]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - arch/x86/kernel/cpu/perf_event.c
perf, x86: Implement user-space RDPMC support, to allow fast, user-space access to...
[~shefty/rdma-dev.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31 #include <asm/compat.h>
32 #include <asm/smp.h>
33 #include <asm/alternative.h>
34
35 #include "perf_event.h"
36
37 #if 0
38 #undef wrmsrl
39 #define wrmsrl(msr, val)                                        \
40 do {                                                            \
41         trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
42                         (unsigned long)(val));                  \
43         native_write_msr((msr), (u32)((u64)(val)),              \
44                         (u32)((u64)(val) >> 32));               \
45 } while (0)
46 #endif
47
48 struct x86_pmu x86_pmu __read_mostly;
49
50 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
51         .enabled = 1,
52 };
53
54 u64 __read_mostly hw_cache_event_ids
55                                 [PERF_COUNT_HW_CACHE_MAX]
56                                 [PERF_COUNT_HW_CACHE_OP_MAX]
57                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
58 u64 __read_mostly hw_cache_extra_regs
59                                 [PERF_COUNT_HW_CACHE_MAX]
60                                 [PERF_COUNT_HW_CACHE_OP_MAX]
61                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
62
63 /*
64  * Propagate event elapsed time into the generic event.
65  * Can only be executed on the CPU where the event is active.
66  * Returns the delta events processed.
67  */
68 u64 x86_perf_event_update(struct perf_event *event)
69 {
70         struct hw_perf_event *hwc = &event->hw;
71         int shift = 64 - x86_pmu.cntval_bits;
72         u64 prev_raw_count, new_raw_count;
73         int idx = hwc->idx;
74         s64 delta;
75
76         if (idx == X86_PMC_IDX_FIXED_BTS)
77                 return 0;
78
79         /*
80          * Careful: an NMI might modify the previous event value.
81          *
82          * Our tactic to handle this is to first atomically read and
83          * exchange a new raw count - then add that new-prev delta
84          * count to the generic event atomically:
85          */
86 again:
87         prev_raw_count = local64_read(&hwc->prev_count);
88         rdmsrl(hwc->event_base, new_raw_count);
89
90         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
91                                         new_raw_count) != prev_raw_count)
92                 goto again;
93
94         /*
95          * Now we have the new raw value and have updated the prev
96          * timestamp already. We can now calculate the elapsed delta
97          * (event-)time and add that to the generic event.
98          *
99          * Careful, not all hw sign-extends above the physical width
100          * of the count.
101          */
102         delta = (new_raw_count << shift) - (prev_raw_count << shift);
103         delta >>= shift;
104
105         local64_add(delta, &event->count);
106         local64_sub(delta, &hwc->period_left);
107
108         return new_raw_count;
109 }
110
111 /*
112  * Find and validate any extra registers to set up.
113  */
114 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
115 {
116         struct hw_perf_event_extra *reg;
117         struct extra_reg *er;
118
119         reg = &event->hw.extra_reg;
120
121         if (!x86_pmu.extra_regs)
122                 return 0;
123
124         for (er = x86_pmu.extra_regs; er->msr; er++) {
125                 if (er->event != (config & er->config_mask))
126                         continue;
127                 if (event->attr.config1 & ~er->valid_mask)
128                         return -EINVAL;
129
130                 reg->idx = er->idx;
131                 reg->config = event->attr.config1;
132                 reg->reg = er->msr;
133                 break;
134         }
135         return 0;
136 }
137
138 static atomic_t active_events;
139 static DEFINE_MUTEX(pmc_reserve_mutex);
140
141 #ifdef CONFIG_X86_LOCAL_APIC
142
143 static bool reserve_pmc_hardware(void)
144 {
145         int i;
146
147         for (i = 0; i < x86_pmu.num_counters; i++) {
148                 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
149                         goto perfctr_fail;
150         }
151
152         for (i = 0; i < x86_pmu.num_counters; i++) {
153                 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
154                         goto eventsel_fail;
155         }
156
157         return true;
158
159 eventsel_fail:
160         for (i--; i >= 0; i--)
161                 release_evntsel_nmi(x86_pmu_config_addr(i));
162
163         i = x86_pmu.num_counters;
164
165 perfctr_fail:
166         for (i--; i >= 0; i--)
167                 release_perfctr_nmi(x86_pmu_event_addr(i));
168
169         return false;
170 }
171
172 static void release_pmc_hardware(void)
173 {
174         int i;
175
176         for (i = 0; i < x86_pmu.num_counters; i++) {
177                 release_perfctr_nmi(x86_pmu_event_addr(i));
178                 release_evntsel_nmi(x86_pmu_config_addr(i));
179         }
180 }
181
182 #else
183
184 static bool reserve_pmc_hardware(void) { return true; }
185 static void release_pmc_hardware(void) {}
186
187 #endif
188
189 static bool check_hw_exists(void)
190 {
191         u64 val, val_new = 0;
192         int i, reg, ret = 0;
193
194         /*
195          * Check to see if the BIOS enabled any of the counters, if so
196          * complain and bail.
197          */
198         for (i = 0; i < x86_pmu.num_counters; i++) {
199                 reg = x86_pmu_config_addr(i);
200                 ret = rdmsrl_safe(reg, &val);
201                 if (ret)
202                         goto msr_fail;
203                 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
204                         goto bios_fail;
205         }
206
207         if (x86_pmu.num_counters_fixed) {
208                 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
209                 ret = rdmsrl_safe(reg, &val);
210                 if (ret)
211                         goto msr_fail;
212                 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
213                         if (val & (0x03 << i*4))
214                                 goto bios_fail;
215                 }
216         }
217
218         /*
219          * Now write a value and read it back to see if it matches,
220          * this is needed to detect certain hardware emulators (qemu/kvm)
221          * that don't trap on the MSR access and always return 0s.
222          */
223         val = 0xabcdUL;
224         ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
225         ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
226         if (ret || val != val_new)
227                 goto msr_fail;
228
229         return true;
230
231 bios_fail:
232         /*
233          * We still allow the PMU driver to operate:
234          */
235         printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
236         printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
237
238         return true;
239
240 msr_fail:
241         printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
242
243         return false;
244 }
245
246 static void hw_perf_event_destroy(struct perf_event *event)
247 {
248         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
249                 release_pmc_hardware();
250                 release_ds_buffers();
251                 mutex_unlock(&pmc_reserve_mutex);
252         }
253 }
254
255 static inline int x86_pmu_initialized(void)
256 {
257         return x86_pmu.handle_irq != NULL;
258 }
259
260 static inline int
261 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
262 {
263         struct perf_event_attr *attr = &event->attr;
264         unsigned int cache_type, cache_op, cache_result;
265         u64 config, val;
266
267         config = attr->config;
268
269         cache_type = (config >>  0) & 0xff;
270         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
271                 return -EINVAL;
272
273         cache_op = (config >>  8) & 0xff;
274         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
275                 return -EINVAL;
276
277         cache_result = (config >> 16) & 0xff;
278         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
279                 return -EINVAL;
280
281         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
282
283         if (val == 0)
284                 return -ENOENT;
285
286         if (val == -1)
287                 return -EINVAL;
288
289         hwc->config |= val;
290         attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
291         return x86_pmu_extra_regs(val, event);
292 }
293
294 int x86_setup_perfctr(struct perf_event *event)
295 {
296         struct perf_event_attr *attr = &event->attr;
297         struct hw_perf_event *hwc = &event->hw;
298         u64 config;
299
300         if (!is_sampling_event(event)) {
301                 hwc->sample_period = x86_pmu.max_period;
302                 hwc->last_period = hwc->sample_period;
303                 local64_set(&hwc->period_left, hwc->sample_period);
304         } else {
305                 /*
306                  * If we have a PMU initialized but no APIC
307                  * interrupts, we cannot sample hardware
308                  * events (user-space has to fall back and
309                  * sample via a hrtimer based software event):
310                  */
311                 if (!x86_pmu.apic)
312                         return -EOPNOTSUPP;
313         }
314
315         if (attr->type == PERF_TYPE_RAW)
316                 return x86_pmu_extra_regs(event->attr.config, event);
317
318         if (attr->type == PERF_TYPE_HW_CACHE)
319                 return set_ext_hw_attr(hwc, event);
320
321         if (attr->config >= x86_pmu.max_events)
322                 return -EINVAL;
323
324         /*
325          * The generic map:
326          */
327         config = x86_pmu.event_map(attr->config);
328
329         if (config == 0)
330                 return -ENOENT;
331
332         if (config == -1LL)
333                 return -EINVAL;
334
335         /*
336          * Branch tracing:
337          */
338         if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
339             !attr->freq && hwc->sample_period == 1) {
340                 /* BTS is not supported by this architecture. */
341                 if (!x86_pmu.bts_active)
342                         return -EOPNOTSUPP;
343
344                 /* BTS is currently only allowed for user-mode. */
345                 if (!attr->exclude_kernel)
346                         return -EOPNOTSUPP;
347         }
348
349         hwc->config |= config;
350
351         return 0;
352 }
353
354 int x86_pmu_hw_config(struct perf_event *event)
355 {
356         if (event->attr.precise_ip) {
357                 int precise = 0;
358
359                 /* Support for constant skid */
360                 if (x86_pmu.pebs_active) {
361                         precise++;
362
363                         /* Support for IP fixup */
364                         if (x86_pmu.lbr_nr)
365                                 precise++;
366                 }
367
368                 if (event->attr.precise_ip > precise)
369                         return -EOPNOTSUPP;
370         }
371
372         /*
373          * Generate PMC IRQs:
374          * (keep 'enabled' bit clear for now)
375          */
376         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
377
378         /*
379          * Count user and OS events unless requested not to
380          */
381         if (!event->attr.exclude_user)
382                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
383         if (!event->attr.exclude_kernel)
384                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
385
386         if (event->attr.type == PERF_TYPE_RAW)
387                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
388
389         return x86_setup_perfctr(event);
390 }
391
392 /*
393  * Setup the hardware configuration for a given attr_type
394  */
395 static int __x86_pmu_event_init(struct perf_event *event)
396 {
397         int err;
398
399         if (!x86_pmu_initialized())
400                 return -ENODEV;
401
402         err = 0;
403         if (!atomic_inc_not_zero(&active_events)) {
404                 mutex_lock(&pmc_reserve_mutex);
405                 if (atomic_read(&active_events) == 0) {
406                         if (!reserve_pmc_hardware())
407                                 err = -EBUSY;
408                         else
409                                 reserve_ds_buffers();
410                 }
411                 if (!err)
412                         atomic_inc(&active_events);
413                 mutex_unlock(&pmc_reserve_mutex);
414         }
415         if (err)
416                 return err;
417
418         event->destroy = hw_perf_event_destroy;
419
420         event->hw.idx = -1;
421         event->hw.last_cpu = -1;
422         event->hw.last_tag = ~0ULL;
423
424         /* mark unused */
425         event->hw.extra_reg.idx = EXTRA_REG_NONE;
426
427         return x86_pmu.hw_config(event);
428 }
429
430 void x86_pmu_disable_all(void)
431 {
432         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
433         int idx;
434
435         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
436                 u64 val;
437
438                 if (!test_bit(idx, cpuc->active_mask))
439                         continue;
440                 rdmsrl(x86_pmu_config_addr(idx), val);
441                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
442                         continue;
443                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
444                 wrmsrl(x86_pmu_config_addr(idx), val);
445         }
446 }
447
448 static void x86_pmu_disable(struct pmu *pmu)
449 {
450         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
451
452         if (!x86_pmu_initialized())
453                 return;
454
455         if (!cpuc->enabled)
456                 return;
457
458         cpuc->n_added = 0;
459         cpuc->enabled = 0;
460         barrier();
461
462         x86_pmu.disable_all();
463 }
464
465 void x86_pmu_enable_all(int added)
466 {
467         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
468         int idx;
469
470         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
471                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
472
473                 if (!test_bit(idx, cpuc->active_mask))
474                         continue;
475
476                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
477         }
478 }
479
480 static struct pmu pmu;
481
482 static inline int is_x86_event(struct perf_event *event)
483 {
484         return event->pmu == &pmu;
485 }
486
487 /*
488  * Event scheduler state:
489  *
490  * Assign events iterating over all events and counters, beginning
491  * with events with least weights first. Keep the current iterator
492  * state in struct sched_state.
493  */
494 struct sched_state {
495         int     weight;
496         int     event;          /* event index */
497         int     counter;        /* counter index */
498         int     unassigned;     /* number of events to be assigned left */
499         unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
500 };
501
502 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
503 #define SCHED_STATES_MAX        2
504
505 struct perf_sched {
506         int                     max_weight;
507         int                     max_events;
508         struct event_constraint **constraints;
509         struct sched_state      state;
510         int                     saved_states;
511         struct sched_state      saved[SCHED_STATES_MAX];
512 };
513
514 /*
515  * Initialize interator that runs through all events and counters.
516  */
517 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
518                             int num, int wmin, int wmax)
519 {
520         int idx;
521
522         memset(sched, 0, sizeof(*sched));
523         sched->max_events       = num;
524         sched->max_weight       = wmax;
525         sched->constraints      = c;
526
527         for (idx = 0; idx < num; idx++) {
528                 if (c[idx]->weight == wmin)
529                         break;
530         }
531
532         sched->state.event      = idx;          /* start with min weight */
533         sched->state.weight     = wmin;
534         sched->state.unassigned = num;
535 }
536
537 static void perf_sched_save_state(struct perf_sched *sched)
538 {
539         if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
540                 return;
541
542         sched->saved[sched->saved_states] = sched->state;
543         sched->saved_states++;
544 }
545
546 static bool perf_sched_restore_state(struct perf_sched *sched)
547 {
548         if (!sched->saved_states)
549                 return false;
550
551         sched->saved_states--;
552         sched->state = sched->saved[sched->saved_states];
553
554         /* continue with next counter: */
555         clear_bit(sched->state.counter++, sched->state.used);
556
557         return true;
558 }
559
560 /*
561  * Select a counter for the current event to schedule. Return true on
562  * success.
563  */
564 static bool __perf_sched_find_counter(struct perf_sched *sched)
565 {
566         struct event_constraint *c;
567         int idx;
568
569         if (!sched->state.unassigned)
570                 return false;
571
572         if (sched->state.event >= sched->max_events)
573                 return false;
574
575         c = sched->constraints[sched->state.event];
576
577         /* Prefer fixed purpose counters */
578         if (x86_pmu.num_counters_fixed) {
579                 idx = X86_PMC_IDX_FIXED;
580                 for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
581                         if (!__test_and_set_bit(idx, sched->state.used))
582                                 goto done;
583                 }
584         }
585         /* Grab the first unused counter starting with idx */
586         idx = sched->state.counter;
587         for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
588                 if (!__test_and_set_bit(idx, sched->state.used))
589                         goto done;
590         }
591
592         return false;
593
594 done:
595         sched->state.counter = idx;
596
597         if (c->overlap)
598                 perf_sched_save_state(sched);
599
600         return true;
601 }
602
603 static bool perf_sched_find_counter(struct perf_sched *sched)
604 {
605         while (!__perf_sched_find_counter(sched)) {
606                 if (!perf_sched_restore_state(sched))
607                         return false;
608         }
609
610         return true;
611 }
612
613 /*
614  * Go through all unassigned events and find the next one to schedule.
615  * Take events with the least weight first. Return true on success.
616  */
617 static bool perf_sched_next_event(struct perf_sched *sched)
618 {
619         struct event_constraint *c;
620
621         if (!sched->state.unassigned || !--sched->state.unassigned)
622                 return false;
623
624         do {
625                 /* next event */
626                 sched->state.event++;
627                 if (sched->state.event >= sched->max_events) {
628                         /* next weight */
629                         sched->state.event = 0;
630                         sched->state.weight++;
631                         if (sched->state.weight > sched->max_weight)
632                                 return false;
633                 }
634                 c = sched->constraints[sched->state.event];
635         } while (c->weight != sched->state.weight);
636
637         sched->state.counter = 0;       /* start with first counter */
638
639         return true;
640 }
641
642 /*
643  * Assign a counter for each event.
644  */
645 static int perf_assign_events(struct event_constraint **constraints, int n,
646                               int wmin, int wmax, int *assign)
647 {
648         struct perf_sched sched;
649
650         perf_sched_init(&sched, constraints, n, wmin, wmax);
651
652         do {
653                 if (!perf_sched_find_counter(&sched))
654                         break;  /* failed */
655                 if (assign)
656                         assign[sched.state.event] = sched.state.counter;
657         } while (perf_sched_next_event(&sched));
658
659         return sched.state.unassigned;
660 }
661
662 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
663 {
664         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
665         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
666         int i, wmin, wmax, num = 0;
667         struct hw_perf_event *hwc;
668
669         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
670
671         for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
672                 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
673                 constraints[i] = c;
674                 wmin = min(wmin, c->weight);
675                 wmax = max(wmax, c->weight);
676         }
677
678         /*
679          * fastpath, try to reuse previous register
680          */
681         for (i = 0; i < n; i++) {
682                 hwc = &cpuc->event_list[i]->hw;
683                 c = constraints[i];
684
685                 /* never assigned */
686                 if (hwc->idx == -1)
687                         break;
688
689                 /* constraint still honored */
690                 if (!test_bit(hwc->idx, c->idxmsk))
691                         break;
692
693                 /* not already used */
694                 if (test_bit(hwc->idx, used_mask))
695                         break;
696
697                 __set_bit(hwc->idx, used_mask);
698                 if (assign)
699                         assign[i] = hwc->idx;
700         }
701
702         /* slow path */
703         if (i != n)
704                 num = perf_assign_events(constraints, n, wmin, wmax, assign);
705
706         /*
707          * scheduling failed or is just a simulation,
708          * free resources if necessary
709          */
710         if (!assign || num) {
711                 for (i = 0; i < n; i++) {
712                         if (x86_pmu.put_event_constraints)
713                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
714                 }
715         }
716         return num ? -EINVAL : 0;
717 }
718
719 /*
720  * dogrp: true if must collect siblings events (group)
721  * returns total number of events and error code
722  */
723 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
724 {
725         struct perf_event *event;
726         int n, max_count;
727
728         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
729
730         /* current number of events already accepted */
731         n = cpuc->n_events;
732
733         if (is_x86_event(leader)) {
734                 if (n >= max_count)
735                         return -EINVAL;
736                 cpuc->event_list[n] = leader;
737                 n++;
738         }
739         if (!dogrp)
740                 return n;
741
742         list_for_each_entry(event, &leader->sibling_list, group_entry) {
743                 if (!is_x86_event(event) ||
744                     event->state <= PERF_EVENT_STATE_OFF)
745                         continue;
746
747                 if (n >= max_count)
748                         return -EINVAL;
749
750                 cpuc->event_list[n] = event;
751                 n++;
752         }
753         return n;
754 }
755
756 static inline void x86_assign_hw_event(struct perf_event *event,
757                                 struct cpu_hw_events *cpuc, int i)
758 {
759         struct hw_perf_event *hwc = &event->hw;
760
761         hwc->idx = cpuc->assign[i];
762         hwc->last_cpu = smp_processor_id();
763         hwc->last_tag = ++cpuc->tags[i];
764
765         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
766                 hwc->config_base = 0;
767                 hwc->event_base = 0;
768         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
769                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
770                 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
771         } else {
772                 hwc->config_base = x86_pmu_config_addr(hwc->idx);
773                 hwc->event_base  = x86_pmu_event_addr(hwc->idx);
774         }
775 }
776
777 static inline int match_prev_assignment(struct hw_perf_event *hwc,
778                                         struct cpu_hw_events *cpuc,
779                                         int i)
780 {
781         return hwc->idx == cpuc->assign[i] &&
782                 hwc->last_cpu == smp_processor_id() &&
783                 hwc->last_tag == cpuc->tags[i];
784 }
785
786 static void x86_pmu_start(struct perf_event *event, int flags);
787
788 static void x86_pmu_enable(struct pmu *pmu)
789 {
790         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
791         struct perf_event *event;
792         struct hw_perf_event *hwc;
793         int i, added = cpuc->n_added;
794
795         if (!x86_pmu_initialized())
796                 return;
797
798         if (cpuc->enabled)
799                 return;
800
801         if (cpuc->n_added) {
802                 int n_running = cpuc->n_events - cpuc->n_added;
803                 /*
804                  * apply assignment obtained either from
805                  * hw_perf_group_sched_in() or x86_pmu_enable()
806                  *
807                  * step1: save events moving to new counters
808                  * step2: reprogram moved events into new counters
809                  */
810                 for (i = 0; i < n_running; i++) {
811                         event = cpuc->event_list[i];
812                         hwc = &event->hw;
813
814                         /*
815                          * we can avoid reprogramming counter if:
816                          * - assigned same counter as last time
817                          * - running on same CPU as last time
818                          * - no other event has used the counter since
819                          */
820                         if (hwc->idx == -1 ||
821                             match_prev_assignment(hwc, cpuc, i))
822                                 continue;
823
824                         /*
825                          * Ensure we don't accidentally enable a stopped
826                          * counter simply because we rescheduled.
827                          */
828                         if (hwc->state & PERF_HES_STOPPED)
829                                 hwc->state |= PERF_HES_ARCH;
830
831                         x86_pmu_stop(event, PERF_EF_UPDATE);
832                 }
833
834                 for (i = 0; i < cpuc->n_events; i++) {
835                         event = cpuc->event_list[i];
836                         hwc = &event->hw;
837
838                         if (!match_prev_assignment(hwc, cpuc, i))
839                                 x86_assign_hw_event(event, cpuc, i);
840                         else if (i < n_running)
841                                 continue;
842
843                         if (hwc->state & PERF_HES_ARCH)
844                                 continue;
845
846                         x86_pmu_start(event, PERF_EF_RELOAD);
847                 }
848                 cpuc->n_added = 0;
849                 perf_events_lapic_init();
850         }
851
852         cpuc->enabled = 1;
853         barrier();
854
855         x86_pmu.enable_all(added);
856 }
857
858 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
859
860 /*
861  * Set the next IRQ period, based on the hwc->period_left value.
862  * To be called with the event disabled in hw:
863  */
864 int x86_perf_event_set_period(struct perf_event *event)
865 {
866         struct hw_perf_event *hwc = &event->hw;
867         s64 left = local64_read(&hwc->period_left);
868         s64 period = hwc->sample_period;
869         int ret = 0, idx = hwc->idx;
870
871         if (idx == X86_PMC_IDX_FIXED_BTS)
872                 return 0;
873
874         /*
875          * If we are way outside a reasonable range then just skip forward:
876          */
877         if (unlikely(left <= -period)) {
878                 left = period;
879                 local64_set(&hwc->period_left, left);
880                 hwc->last_period = period;
881                 ret = 1;
882         }
883
884         if (unlikely(left <= 0)) {
885                 left += period;
886                 local64_set(&hwc->period_left, left);
887                 hwc->last_period = period;
888                 ret = 1;
889         }
890         /*
891          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
892          */
893         if (unlikely(left < 2))
894                 left = 2;
895
896         if (left > x86_pmu.max_period)
897                 left = x86_pmu.max_period;
898
899         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
900
901         /*
902          * The hw event starts counting from this event offset,
903          * mark it to be able to extra future deltas:
904          */
905         local64_set(&hwc->prev_count, (u64)-left);
906
907         wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
908
909         /*
910          * Due to erratum on certan cpu we need
911          * a second write to be sure the register
912          * is updated properly
913          */
914         if (x86_pmu.perfctr_second_write) {
915                 wrmsrl(hwc->event_base,
916                         (u64)(-left) & x86_pmu.cntval_mask);
917         }
918
919         perf_event_update_userpage(event);
920
921         return ret;
922 }
923
924 void x86_pmu_enable_event(struct perf_event *event)
925 {
926         if (__this_cpu_read(cpu_hw_events.enabled))
927                 __x86_pmu_enable_event(&event->hw,
928                                        ARCH_PERFMON_EVENTSEL_ENABLE);
929 }
930
931 /*
932  * Add a single event to the PMU.
933  *
934  * The event is added to the group of enabled events
935  * but only if it can be scehduled with existing events.
936  */
937 static int x86_pmu_add(struct perf_event *event, int flags)
938 {
939         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
940         struct hw_perf_event *hwc;
941         int assign[X86_PMC_IDX_MAX];
942         int n, n0, ret;
943
944         hwc = &event->hw;
945
946         perf_pmu_disable(event->pmu);
947         n0 = cpuc->n_events;
948         ret = n = collect_events(cpuc, event, false);
949         if (ret < 0)
950                 goto out;
951
952         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
953         if (!(flags & PERF_EF_START))
954                 hwc->state |= PERF_HES_ARCH;
955
956         /*
957          * If group events scheduling transaction was started,
958          * skip the schedulability test here, it will be performed
959          * at commit time (->commit_txn) as a whole
960          */
961         if (cpuc->group_flag & PERF_EVENT_TXN)
962                 goto done_collect;
963
964         ret = x86_pmu.schedule_events(cpuc, n, assign);
965         if (ret)
966                 goto out;
967         /*
968          * copy new assignment, now we know it is possible
969          * will be used by hw_perf_enable()
970          */
971         memcpy(cpuc->assign, assign, n*sizeof(int));
972
973 done_collect:
974         cpuc->n_events = n;
975         cpuc->n_added += n - n0;
976         cpuc->n_txn += n - n0;
977
978         ret = 0;
979 out:
980         perf_pmu_enable(event->pmu);
981         return ret;
982 }
983
984 static void x86_pmu_start(struct perf_event *event, int flags)
985 {
986         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
987         int idx = event->hw.idx;
988
989         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
990                 return;
991
992         if (WARN_ON_ONCE(idx == -1))
993                 return;
994
995         if (flags & PERF_EF_RELOAD) {
996                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
997                 x86_perf_event_set_period(event);
998         }
999
1000         event->hw.state = 0;
1001
1002         cpuc->events[idx] = event;
1003         __set_bit(idx, cpuc->active_mask);
1004         __set_bit(idx, cpuc->running);
1005         x86_pmu.enable(event);
1006         perf_event_update_userpage(event);
1007 }
1008
1009 void perf_event_print_debug(void)
1010 {
1011         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1012         u64 pebs;
1013         struct cpu_hw_events *cpuc;
1014         unsigned long flags;
1015         int cpu, idx;
1016
1017         if (!x86_pmu.num_counters)
1018                 return;
1019
1020         local_irq_save(flags);
1021
1022         cpu = smp_processor_id();
1023         cpuc = &per_cpu(cpu_hw_events, cpu);
1024
1025         if (x86_pmu.version >= 2) {
1026                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1027                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1028                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1029                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1030                 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1031
1032                 pr_info("\n");
1033                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1034                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1035                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1036                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1037                 pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1038         }
1039         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1040
1041         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1042                 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1043                 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1044
1045                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1046
1047                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1048                         cpu, idx, pmc_ctrl);
1049                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1050                         cpu, idx, pmc_count);
1051                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1052                         cpu, idx, prev_left);
1053         }
1054         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1055                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1056
1057                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1058                         cpu, idx, pmc_count);
1059         }
1060         local_irq_restore(flags);
1061 }
1062
1063 void x86_pmu_stop(struct perf_event *event, int flags)
1064 {
1065         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1066         struct hw_perf_event *hwc = &event->hw;
1067
1068         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1069                 x86_pmu.disable(event);
1070                 cpuc->events[hwc->idx] = NULL;
1071                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1072                 hwc->state |= PERF_HES_STOPPED;
1073         }
1074
1075         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1076                 /*
1077                  * Drain the remaining delta count out of a event
1078                  * that we are disabling:
1079                  */
1080                 x86_perf_event_update(event);
1081                 hwc->state |= PERF_HES_UPTODATE;
1082         }
1083 }
1084
1085 static void x86_pmu_del(struct perf_event *event, int flags)
1086 {
1087         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1088         int i;
1089
1090         /*
1091          * If we're called during a txn, we don't need to do anything.
1092          * The events never got scheduled and ->cancel_txn will truncate
1093          * the event_list.
1094          */
1095         if (cpuc->group_flag & PERF_EVENT_TXN)
1096                 return;
1097
1098         x86_pmu_stop(event, PERF_EF_UPDATE);
1099
1100         for (i = 0; i < cpuc->n_events; i++) {
1101                 if (event == cpuc->event_list[i]) {
1102
1103                         if (x86_pmu.put_event_constraints)
1104                                 x86_pmu.put_event_constraints(cpuc, event);
1105
1106                         while (++i < cpuc->n_events)
1107                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1108
1109                         --cpuc->n_events;
1110                         break;
1111                 }
1112         }
1113         perf_event_update_userpage(event);
1114 }
1115
1116 int x86_pmu_handle_irq(struct pt_regs *regs)
1117 {
1118         struct perf_sample_data data;
1119         struct cpu_hw_events *cpuc;
1120         struct perf_event *event;
1121         int idx, handled = 0;
1122         u64 val;
1123
1124         perf_sample_data_init(&data, 0);
1125
1126         cpuc = &__get_cpu_var(cpu_hw_events);
1127
1128         /*
1129          * Some chipsets need to unmask the LVTPC in a particular spot
1130          * inside the nmi handler.  As a result, the unmasking was pushed
1131          * into all the nmi handlers.
1132          *
1133          * This generic handler doesn't seem to have any issues where the
1134          * unmasking occurs so it was left at the top.
1135          */
1136         apic_write(APIC_LVTPC, APIC_DM_NMI);
1137
1138         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1139                 if (!test_bit(idx, cpuc->active_mask)) {
1140                         /*
1141                          * Though we deactivated the counter some cpus
1142                          * might still deliver spurious interrupts still
1143                          * in flight. Catch them:
1144                          */
1145                         if (__test_and_clear_bit(idx, cpuc->running))
1146                                 handled++;
1147                         continue;
1148                 }
1149
1150                 event = cpuc->events[idx];
1151
1152                 val = x86_perf_event_update(event);
1153                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1154                         continue;
1155
1156                 /*
1157                  * event overflow
1158                  */
1159                 handled++;
1160                 data.period     = event->hw.last_period;
1161
1162                 if (!x86_perf_event_set_period(event))
1163                         continue;
1164
1165                 if (perf_event_overflow(event, &data, regs))
1166                         x86_pmu_stop(event, 0);
1167         }
1168
1169         if (handled)
1170                 inc_irq_stat(apic_perf_irqs);
1171
1172         return handled;
1173 }
1174
1175 void perf_events_lapic_init(void)
1176 {
1177         if (!x86_pmu.apic || !x86_pmu_initialized())
1178                 return;
1179
1180         /*
1181          * Always use NMI for PMU
1182          */
1183         apic_write(APIC_LVTPC, APIC_DM_NMI);
1184 }
1185
1186 static int __kprobes
1187 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1188 {
1189         if (!atomic_read(&active_events))
1190                 return NMI_DONE;
1191
1192         return x86_pmu.handle_irq(regs);
1193 }
1194
1195 struct event_constraint emptyconstraint;
1196 struct event_constraint unconstrained;
1197
1198 static int __cpuinit
1199 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1200 {
1201         unsigned int cpu = (long)hcpu;
1202         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1203         int ret = NOTIFY_OK;
1204
1205         switch (action & ~CPU_TASKS_FROZEN) {
1206         case CPU_UP_PREPARE:
1207                 cpuc->kfree_on_online = NULL;
1208                 if (x86_pmu.cpu_prepare)
1209                         ret = x86_pmu.cpu_prepare(cpu);
1210                 break;
1211
1212         case CPU_STARTING:
1213                 set_in_cr4(X86_CR4_PCE);
1214                 if (x86_pmu.cpu_starting)
1215                         x86_pmu.cpu_starting(cpu);
1216                 break;
1217
1218         case CPU_ONLINE:
1219                 kfree(cpuc->kfree_on_online);
1220                 break;
1221
1222         case CPU_DYING:
1223                 if (x86_pmu.cpu_dying)
1224                         x86_pmu.cpu_dying(cpu);
1225                 break;
1226
1227         case CPU_UP_CANCELED:
1228         case CPU_DEAD:
1229                 if (x86_pmu.cpu_dead)
1230                         x86_pmu.cpu_dead(cpu);
1231                 break;
1232
1233         default:
1234                 break;
1235         }
1236
1237         return ret;
1238 }
1239
1240 static void __init pmu_check_apic(void)
1241 {
1242         if (cpu_has_apic)
1243                 return;
1244
1245         x86_pmu.apic = 0;
1246         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1247         pr_info("no hardware sampling interrupt available.\n");
1248 }
1249
1250 static int __init init_hw_perf_events(void)
1251 {
1252         struct x86_pmu_quirk *quirk;
1253         struct event_constraint *c;
1254         int err;
1255
1256         pr_info("Performance Events: ");
1257
1258         switch (boot_cpu_data.x86_vendor) {
1259         case X86_VENDOR_INTEL:
1260                 err = intel_pmu_init();
1261                 break;
1262         case X86_VENDOR_AMD:
1263                 err = amd_pmu_init();
1264                 break;
1265         default:
1266                 return 0;
1267         }
1268         if (err != 0) {
1269                 pr_cont("no PMU driver, software events only.\n");
1270                 return 0;
1271         }
1272
1273         pmu_check_apic();
1274
1275         /* sanity check that the hardware exists or is emulated */
1276         if (!check_hw_exists())
1277                 return 0;
1278
1279         pr_cont("%s PMU driver.\n", x86_pmu.name);
1280
1281         for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1282                 quirk->func();
1283
1284         if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1285                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1286                      x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1287                 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1288         }
1289         x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1290
1291         if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1292                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1293                      x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1294                 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1295         }
1296
1297         x86_pmu.intel_ctrl |=
1298                 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1299
1300         perf_events_lapic_init();
1301         register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1302
1303         unconstrained = (struct event_constraint)
1304                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1305                                    0, x86_pmu.num_counters, 0);
1306
1307         if (x86_pmu.event_constraints) {
1308                 /*
1309                  * event on fixed counter2 (REF_CYCLES) only works on this
1310                  * counter, so do not extend mask to generic counters
1311                  */
1312                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1313                         if (c->cmask != X86_RAW_EVENT_MASK
1314                             || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
1315                                 continue;
1316                         }
1317
1318                         c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1319                         c->weight += x86_pmu.num_counters;
1320                 }
1321         }
1322
1323         pr_info("... version:                %d\n",     x86_pmu.version);
1324         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1325         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1326         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1327         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1328         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1329         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1330
1331         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1332         perf_cpu_notifier(x86_pmu_notifier);
1333
1334         return 0;
1335 }
1336 early_initcall(init_hw_perf_events);
1337
1338 static inline void x86_pmu_read(struct perf_event *event)
1339 {
1340         x86_perf_event_update(event);
1341 }
1342
1343 /*
1344  * Start group events scheduling transaction
1345  * Set the flag to make pmu::enable() not perform the
1346  * schedulability test, it will be performed at commit time
1347  */
1348 static void x86_pmu_start_txn(struct pmu *pmu)
1349 {
1350         perf_pmu_disable(pmu);
1351         __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1352         __this_cpu_write(cpu_hw_events.n_txn, 0);
1353 }
1354
1355 /*
1356  * Stop group events scheduling transaction
1357  * Clear the flag and pmu::enable() will perform the
1358  * schedulability test.
1359  */
1360 static void x86_pmu_cancel_txn(struct pmu *pmu)
1361 {
1362         __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1363         /*
1364          * Truncate the collected events.
1365          */
1366         __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1367         __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1368         perf_pmu_enable(pmu);
1369 }
1370
1371 /*
1372  * Commit group events scheduling transaction
1373  * Perform the group schedulability test as a whole
1374  * Return 0 if success
1375  */
1376 static int x86_pmu_commit_txn(struct pmu *pmu)
1377 {
1378         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1379         int assign[X86_PMC_IDX_MAX];
1380         int n, ret;
1381
1382         n = cpuc->n_events;
1383
1384         if (!x86_pmu_initialized())
1385                 return -EAGAIN;
1386
1387         ret = x86_pmu.schedule_events(cpuc, n, assign);
1388         if (ret)
1389                 return ret;
1390
1391         /*
1392          * copy new assignment, now we know it is possible
1393          * will be used by hw_perf_enable()
1394          */
1395         memcpy(cpuc->assign, assign, n*sizeof(int));
1396
1397         cpuc->group_flag &= ~PERF_EVENT_TXN;
1398         perf_pmu_enable(pmu);
1399         return 0;
1400 }
1401 /*
1402  * a fake_cpuc is used to validate event groups. Due to
1403  * the extra reg logic, we need to also allocate a fake
1404  * per_core and per_cpu structure. Otherwise, group events
1405  * using extra reg may conflict without the kernel being
1406  * able to catch this when the last event gets added to
1407  * the group.
1408  */
1409 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1410 {
1411         kfree(cpuc->shared_regs);
1412         kfree(cpuc);
1413 }
1414
1415 static struct cpu_hw_events *allocate_fake_cpuc(void)
1416 {
1417         struct cpu_hw_events *cpuc;
1418         int cpu = raw_smp_processor_id();
1419
1420         cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1421         if (!cpuc)
1422                 return ERR_PTR(-ENOMEM);
1423
1424         /* only needed, if we have extra_regs */
1425         if (x86_pmu.extra_regs) {
1426                 cpuc->shared_regs = allocate_shared_regs(cpu);
1427                 if (!cpuc->shared_regs)
1428                         goto error;
1429         }
1430         return cpuc;
1431 error:
1432         free_fake_cpuc(cpuc);
1433         return ERR_PTR(-ENOMEM);
1434 }
1435
1436 /*
1437  * validate that we can schedule this event
1438  */
1439 static int validate_event(struct perf_event *event)
1440 {
1441         struct cpu_hw_events *fake_cpuc;
1442         struct event_constraint *c;
1443         int ret = 0;
1444
1445         fake_cpuc = allocate_fake_cpuc();
1446         if (IS_ERR(fake_cpuc))
1447                 return PTR_ERR(fake_cpuc);
1448
1449         c = x86_pmu.get_event_constraints(fake_cpuc, event);
1450
1451         if (!c || !c->weight)
1452                 ret = -EINVAL;
1453
1454         if (x86_pmu.put_event_constraints)
1455                 x86_pmu.put_event_constraints(fake_cpuc, event);
1456
1457         free_fake_cpuc(fake_cpuc);
1458
1459         return ret;
1460 }
1461
1462 /*
1463  * validate a single event group
1464  *
1465  * validation include:
1466  *      - check events are compatible which each other
1467  *      - events do not compete for the same counter
1468  *      - number of events <= number of counters
1469  *
1470  * validation ensures the group can be loaded onto the
1471  * PMU if it was the only group available.
1472  */
1473 static int validate_group(struct perf_event *event)
1474 {
1475         struct perf_event *leader = event->group_leader;
1476         struct cpu_hw_events *fake_cpuc;
1477         int ret = -EINVAL, n;
1478
1479         fake_cpuc = allocate_fake_cpuc();
1480         if (IS_ERR(fake_cpuc))
1481                 return PTR_ERR(fake_cpuc);
1482         /*
1483          * the event is not yet connected with its
1484          * siblings therefore we must first collect
1485          * existing siblings, then add the new event
1486          * before we can simulate the scheduling
1487          */
1488         n = collect_events(fake_cpuc, leader, true);
1489         if (n < 0)
1490                 goto out;
1491
1492         fake_cpuc->n_events = n;
1493         n = collect_events(fake_cpuc, event, false);
1494         if (n < 0)
1495                 goto out;
1496
1497         fake_cpuc->n_events = n;
1498
1499         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1500
1501 out:
1502         free_fake_cpuc(fake_cpuc);
1503         return ret;
1504 }
1505
1506 static int x86_pmu_event_init(struct perf_event *event)
1507 {
1508         struct pmu *tmp;
1509         int err;
1510
1511         switch (event->attr.type) {
1512         case PERF_TYPE_RAW:
1513         case PERF_TYPE_HARDWARE:
1514         case PERF_TYPE_HW_CACHE:
1515                 break;
1516
1517         default:
1518                 return -ENOENT;
1519         }
1520
1521         err = __x86_pmu_event_init(event);
1522         if (!err) {
1523                 /*
1524                  * we temporarily connect event to its pmu
1525                  * such that validate_group() can classify
1526                  * it as an x86 event using is_x86_event()
1527                  */
1528                 tmp = event->pmu;
1529                 event->pmu = &pmu;
1530
1531                 if (event->group_leader != event)
1532                         err = validate_group(event);
1533                 else
1534                         err = validate_event(event);
1535
1536                 event->pmu = tmp;
1537         }
1538         if (err) {
1539                 if (event->destroy)
1540                         event->destroy(event);
1541         }
1542
1543         return err;
1544 }
1545
1546 static int x86_pmu_event_idx(struct perf_event *event)
1547 {
1548         int idx = event->hw.idx;
1549
1550         if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
1551                 idx -= X86_PMC_IDX_FIXED;
1552                 idx |= 1 << 30;
1553         }
1554
1555         return idx + 1;
1556 }
1557
1558 static struct pmu pmu = {
1559         .pmu_enable     = x86_pmu_enable,
1560         .pmu_disable    = x86_pmu_disable,
1561
1562         .event_init     = x86_pmu_event_init,
1563
1564         .add            = x86_pmu_add,
1565         .del            = x86_pmu_del,
1566         .start          = x86_pmu_start,
1567         .stop           = x86_pmu_stop,
1568         .read           = x86_pmu_read,
1569
1570         .start_txn      = x86_pmu_start_txn,
1571         .cancel_txn     = x86_pmu_cancel_txn,
1572         .commit_txn     = x86_pmu_commit_txn,
1573
1574         .event_idx      = x86_pmu_event_idx,
1575 };
1576
1577 /*
1578  * callchain support
1579  */
1580
1581 static int backtrace_stack(void *data, char *name)
1582 {
1583         return 0;
1584 }
1585
1586 static void backtrace_address(void *data, unsigned long addr, int reliable)
1587 {
1588         struct perf_callchain_entry *entry = data;
1589
1590         perf_callchain_store(entry, addr);
1591 }
1592
1593 static const struct stacktrace_ops backtrace_ops = {
1594         .stack                  = backtrace_stack,
1595         .address                = backtrace_address,
1596         .walk_stack             = print_context_stack_bp,
1597 };
1598
1599 void
1600 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1601 {
1602         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1603                 /* TODO: We don't support guest os callchain now */
1604                 return;
1605         }
1606
1607         perf_callchain_store(entry, regs->ip);
1608
1609         dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1610 }
1611
1612 #ifdef CONFIG_COMPAT
1613 static inline int
1614 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1615 {
1616         /* 32-bit process in 64-bit kernel. */
1617         struct stack_frame_ia32 frame;
1618         const void __user *fp;
1619
1620         if (!test_thread_flag(TIF_IA32))
1621                 return 0;
1622
1623         fp = compat_ptr(regs->bp);
1624         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1625                 unsigned long bytes;
1626                 frame.next_frame     = 0;
1627                 frame.return_address = 0;
1628
1629                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1630                 if (bytes != sizeof(frame))
1631                         break;
1632
1633                 if (fp < compat_ptr(regs->sp))
1634                         break;
1635
1636                 perf_callchain_store(entry, frame.return_address);
1637                 fp = compat_ptr(frame.next_frame);
1638         }
1639         return 1;
1640 }
1641 #else
1642 static inline int
1643 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1644 {
1645     return 0;
1646 }
1647 #endif
1648
1649 void
1650 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1651 {
1652         struct stack_frame frame;
1653         const void __user *fp;
1654
1655         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1656                 /* TODO: We don't support guest os callchain now */
1657                 return;
1658         }
1659
1660         fp = (void __user *)regs->bp;
1661
1662         perf_callchain_store(entry, regs->ip);
1663
1664         if (!current->mm)
1665                 return;
1666
1667         if (perf_callchain_user32(regs, entry))
1668                 return;
1669
1670         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1671                 unsigned long bytes;
1672                 frame.next_frame             = NULL;
1673                 frame.return_address = 0;
1674
1675                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1676                 if (bytes != sizeof(frame))
1677                         break;
1678
1679                 if ((unsigned long)fp < regs->sp)
1680                         break;
1681
1682                 perf_callchain_store(entry, frame.return_address);
1683                 fp = frame.next_frame;
1684         }
1685 }
1686
1687 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1688 {
1689         unsigned long ip;
1690
1691         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1692                 ip = perf_guest_cbs->get_guest_ip();
1693         else
1694                 ip = instruction_pointer(regs);
1695
1696         return ip;
1697 }
1698
1699 unsigned long perf_misc_flags(struct pt_regs *regs)
1700 {
1701         int misc = 0;
1702
1703         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1704                 if (perf_guest_cbs->is_user_mode())
1705                         misc |= PERF_RECORD_MISC_GUEST_USER;
1706                 else
1707                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1708         } else {
1709                 if (user_mode(regs))
1710                         misc |= PERF_RECORD_MISC_USER;
1711                 else
1712                         misc |= PERF_RECORD_MISC_KERNEL;
1713         }
1714
1715         if (regs->flags & PERF_EFLAGS_EXACT)
1716                 misc |= PERF_RECORD_MISC_EXACT_IP;
1717
1718         return misc;
1719 }
1720
1721 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
1722 {
1723         cap->version            = x86_pmu.version;
1724         cap->num_counters_gp    = x86_pmu.num_counters;
1725         cap->num_counters_fixed = x86_pmu.num_counters_fixed;
1726         cap->bit_width_gp       = x86_pmu.cntval_bits;
1727         cap->bit_width_fixed    = x86_pmu.cntval_bits;
1728         cap->events_mask        = (unsigned int)x86_pmu.events_maskl;
1729         cap->events_mask_len    = x86_pmu.events_mask_len;
1730 }
1731 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);