perf tools: Add x86 RDPMC, RDTSC test
[~shefty/rdma-dev.git] / tools / perf / builtin-test.c
1 /*
2  * builtin-test.c
3  *
4  * Builtin regression testing command: ever growing number of sanity tests
5  */
6 #include "builtin.h"
7
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/debugfs.h"
11 #include "util/evlist.h"
12 #include "util/parse-options.h"
13 #include "util/parse-events.h"
14 #include "util/symbol.h"
15 #include "util/thread_map.h"
16 #include "../../include/linux/hw_breakpoint.h"
17
18 #include <sys/mman.h>
19
20 static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
21 {
22         bool *visited = symbol__priv(sym);
23         *visited = true;
24         return 0;
25 }
26
27 static int test__vmlinux_matches_kallsyms(void)
28 {
29         int err = -1;
30         struct rb_node *nd;
31         struct symbol *sym;
32         struct map *kallsyms_map, *vmlinux_map;
33         struct machine kallsyms, vmlinux;
34         enum map_type type = MAP__FUNCTION;
35         long page_size = sysconf(_SC_PAGE_SIZE);
36         struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
37
38         /*
39          * Step 1:
40          *
41          * Init the machines that will hold kernel, modules obtained from
42          * both vmlinux + .ko files and from /proc/kallsyms split by modules.
43          */
44         machine__init(&kallsyms, "", HOST_KERNEL_ID);
45         machine__init(&vmlinux, "", HOST_KERNEL_ID);
46
47         /*
48          * Step 2:
49          *
50          * Create the kernel maps for kallsyms and the DSO where we will then
51          * load /proc/kallsyms. Also create the modules maps from /proc/modules
52          * and find the .ko files that match them in /lib/modules/`uname -r`/.
53          */
54         if (machine__create_kernel_maps(&kallsyms) < 0) {
55                 pr_debug("machine__create_kernel_maps ");
56                 return -1;
57         }
58
59         /*
60          * Step 3:
61          *
62          * Load and split /proc/kallsyms into multiple maps, one per module.
63          */
64         if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
65                 pr_debug("dso__load_kallsyms ");
66                 goto out;
67         }
68
69         /*
70          * Step 4:
71          *
72          * kallsyms will be internally on demand sorted by name so that we can
73          * find the reference relocation * symbol, i.e. the symbol we will use
74          * to see if the running kernel was relocated by checking if it has the
75          * same value in the vmlinux file we load.
76          */
77         kallsyms_map = machine__kernel_map(&kallsyms, type);
78
79         sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
80         if (sym == NULL) {
81                 pr_debug("dso__find_symbol_by_name ");
82                 goto out;
83         }
84
85         ref_reloc_sym.addr = sym->start;
86
87         /*
88          * Step 5:
89          *
90          * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
91          */
92         if (machine__create_kernel_maps(&vmlinux) < 0) {
93                 pr_debug("machine__create_kernel_maps ");
94                 goto out;
95         }
96
97         vmlinux_map = machine__kernel_map(&vmlinux, type);
98         map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
99
100         /*
101          * Step 6:
102          *
103          * Locate a vmlinux file in the vmlinux path that has a buildid that
104          * matches the one of the running kernel.
105          *
106          * While doing that look if we find the ref reloc symbol, if we find it
107          * we'll have its ref_reloc_symbol.unrelocated_addr and then
108          * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
109          * to fixup the symbols.
110          */
111         if (machine__load_vmlinux_path(&vmlinux, type,
112                                        vmlinux_matches_kallsyms_filter) <= 0) {
113                 pr_debug("machine__load_vmlinux_path ");
114                 goto out;
115         }
116
117         err = 0;
118         /*
119          * Step 7:
120          *
121          * Now look at the symbols in the vmlinux DSO and check if we find all of them
122          * in the kallsyms dso. For the ones that are in both, check its names and
123          * end addresses too.
124          */
125         for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
126                 struct symbol *pair, *first_pair;
127                 bool backwards = true;
128
129                 sym  = rb_entry(nd, struct symbol, rb_node);
130
131                 if (sym->start == sym->end)
132                         continue;
133
134                 first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
135                 pair = first_pair;
136
137                 if (pair && pair->start == sym->start) {
138 next_pair:
139                         if (strcmp(sym->name, pair->name) == 0) {
140                                 /*
141                                  * kallsyms don't have the symbol end, so we
142                                  * set that by using the next symbol start - 1,
143                                  * in some cases we get this up to a page
144                                  * wrong, trace_kmalloc when I was developing
145                                  * this code was one such example, 2106 bytes
146                                  * off the real size. More than that and we
147                                  * _really_ have a problem.
148                                  */
149                                 s64 skew = sym->end - pair->end;
150                                 if (llabs(skew) < page_size)
151                                         continue;
152
153                                 pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
154                                          sym->start, sym->name, sym->end, pair->end);
155                         } else {
156                                 struct rb_node *nnd;
157 detour:
158                                 nnd = backwards ? rb_prev(&pair->rb_node) :
159                                                   rb_next(&pair->rb_node);
160                                 if (nnd) {
161                                         struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
162
163                                         if (next->start == sym->start) {
164                                                 pair = next;
165                                                 goto next_pair;
166                                         }
167                                 }
168
169                                 if (backwards) {
170                                         backwards = false;
171                                         pair = first_pair;
172                                         goto detour;
173                                 }
174
175                                 pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
176                                          sym->start, sym->name, pair->name);
177                         }
178                 } else
179                         pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
180
181                 err = -1;
182         }
183
184         if (!verbose)
185                 goto out;
186
187         pr_info("Maps only in vmlinux:\n");
188
189         for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
190                 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
191                 /*
192                  * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
193                  * the kernel will have the path for the vmlinux file being used,
194                  * so use the short name, less descriptive but the same ("[kernel]" in
195                  * both cases.
196                  */
197                 pair = map_groups__find_by_name(&kallsyms.kmaps, type,
198                                                 (pos->dso->kernel ?
199                                                         pos->dso->short_name :
200                                                         pos->dso->name));
201                 if (pair)
202                         pair->priv = 1;
203                 else
204                         map__fprintf(pos, stderr);
205         }
206
207         pr_info("Maps in vmlinux with a different name in kallsyms:\n");
208
209         for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
210                 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
211
212                 pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
213                 if (pair == NULL || pair->priv)
214                         continue;
215
216                 if (pair->start == pos->start) {
217                         pair->priv = 1;
218                         pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
219                                 pos->start, pos->end, pos->pgoff, pos->dso->name);
220                         if (pos->pgoff != pair->pgoff || pos->end != pair->end)
221                                 pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
222                                         pair->start, pair->end, pair->pgoff);
223                         pr_info(" %s\n", pair->dso->name);
224                         pair->priv = 1;
225                 }
226         }
227
228         pr_info("Maps only in kallsyms:\n");
229
230         for (nd = rb_first(&kallsyms.kmaps.maps[type]);
231              nd; nd = rb_next(nd)) {
232                 struct map *pos = rb_entry(nd, struct map, rb_node);
233
234                 if (!pos->priv)
235                         map__fprintf(pos, stderr);
236         }
237 out:
238         return err;
239 }
240
241 #include "util/cpumap.h"
242 #include "util/evsel.h"
243 #include <sys/types.h>
244
245 static int trace_event__id(const char *evname)
246 {
247         char *filename;
248         int err = -1, fd;
249
250         if (asprintf(&filename,
251                      "%s/syscalls/%s/id",
252                      tracing_events_path, evname) < 0)
253                 return -1;
254
255         fd = open(filename, O_RDONLY);
256         if (fd >= 0) {
257                 char id[16];
258                 if (read(fd, id, sizeof(id)) > 0)
259                         err = atoi(id);
260                 close(fd);
261         }
262
263         free(filename);
264         return err;
265 }
266
267 static int test__open_syscall_event(void)
268 {
269         int err = -1, fd;
270         struct thread_map *threads;
271         struct perf_evsel *evsel;
272         struct perf_event_attr attr;
273         unsigned int nr_open_calls = 111, i;
274         int id = trace_event__id("sys_enter_open");
275
276         if (id < 0) {
277                 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
278                 return -1;
279         }
280
281         threads = thread_map__new(-1, getpid());
282         if (threads == NULL) {
283                 pr_debug("thread_map__new\n");
284                 return -1;
285         }
286
287         memset(&attr, 0, sizeof(attr));
288         attr.type = PERF_TYPE_TRACEPOINT;
289         attr.config = id;
290         evsel = perf_evsel__new(&attr, 0);
291         if (evsel == NULL) {
292                 pr_debug("perf_evsel__new\n");
293                 goto out_thread_map_delete;
294         }
295
296         if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
297                 pr_debug("failed to open counter: %s, "
298                          "tweak /proc/sys/kernel/perf_event_paranoid?\n",
299                          strerror(errno));
300                 goto out_evsel_delete;
301         }
302
303         for (i = 0; i < nr_open_calls; ++i) {
304                 fd = open("/etc/passwd", O_RDONLY);
305                 close(fd);
306         }
307
308         if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
309                 pr_debug("perf_evsel__read_on_cpu\n");
310                 goto out_close_fd;
311         }
312
313         if (evsel->counts->cpu[0].val != nr_open_calls) {
314                 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
315                          nr_open_calls, evsel->counts->cpu[0].val);
316                 goto out_close_fd;
317         }
318         
319         err = 0;
320 out_close_fd:
321         perf_evsel__close_fd(evsel, 1, threads->nr);
322 out_evsel_delete:
323         perf_evsel__delete(evsel);
324 out_thread_map_delete:
325         thread_map__delete(threads);
326         return err;
327 }
328
329 #include <sched.h>
330
331 static int test__open_syscall_event_on_all_cpus(void)
332 {
333         int err = -1, fd, cpu;
334         struct thread_map *threads;
335         struct cpu_map *cpus;
336         struct perf_evsel *evsel;
337         struct perf_event_attr attr;
338         unsigned int nr_open_calls = 111, i;
339         cpu_set_t cpu_set;
340         int id = trace_event__id("sys_enter_open");
341
342         if (id < 0) {
343                 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
344                 return -1;
345         }
346
347         threads = thread_map__new(-1, getpid());
348         if (threads == NULL) {
349                 pr_debug("thread_map__new\n");
350                 return -1;
351         }
352
353         cpus = cpu_map__new(NULL);
354         if (cpus == NULL) {
355                 pr_debug("cpu_map__new\n");
356                 goto out_thread_map_delete;
357         }
358
359
360         CPU_ZERO(&cpu_set);
361
362         memset(&attr, 0, sizeof(attr));
363         attr.type = PERF_TYPE_TRACEPOINT;
364         attr.config = id;
365         evsel = perf_evsel__new(&attr, 0);
366         if (evsel == NULL) {
367                 pr_debug("perf_evsel__new\n");
368                 goto out_thread_map_delete;
369         }
370
371         if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
372                 pr_debug("failed to open counter: %s, "
373                          "tweak /proc/sys/kernel/perf_event_paranoid?\n",
374                          strerror(errno));
375                 goto out_evsel_delete;
376         }
377
378         for (cpu = 0; cpu < cpus->nr; ++cpu) {
379                 unsigned int ncalls = nr_open_calls + cpu;
380                 /*
381                  * XXX eventually lift this restriction in a way that
382                  * keeps perf building on older glibc installations
383                  * without CPU_ALLOC. 1024 cpus in 2010 still seems
384                  * a reasonable upper limit tho :-)
385                  */
386                 if (cpus->map[cpu] >= CPU_SETSIZE) {
387                         pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
388                         continue;
389                 }
390
391                 CPU_SET(cpus->map[cpu], &cpu_set);
392                 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
393                         pr_debug("sched_setaffinity() failed on CPU %d: %s ",
394                                  cpus->map[cpu],
395                                  strerror(errno));
396                         goto out_close_fd;
397                 }
398                 for (i = 0; i < ncalls; ++i) {
399                         fd = open("/etc/passwd", O_RDONLY);
400                         close(fd);
401                 }
402                 CPU_CLR(cpus->map[cpu], &cpu_set);
403         }
404
405         /*
406          * Here we need to explicitely preallocate the counts, as if
407          * we use the auto allocation it will allocate just for 1 cpu,
408          * as we start by cpu 0.
409          */
410         if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
411                 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
412                 goto out_close_fd;
413         }
414
415         err = 0;
416
417         for (cpu = 0; cpu < cpus->nr; ++cpu) {
418                 unsigned int expected;
419
420                 if (cpus->map[cpu] >= CPU_SETSIZE)
421                         continue;
422
423                 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
424                         pr_debug("perf_evsel__read_on_cpu\n");
425                         err = -1;
426                         break;
427                 }
428
429                 expected = nr_open_calls + cpu;
430                 if (evsel->counts->cpu[cpu].val != expected) {
431                         pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
432                                  expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
433                         err = -1;
434                 }
435         }
436
437 out_close_fd:
438         perf_evsel__close_fd(evsel, 1, threads->nr);
439 out_evsel_delete:
440         perf_evsel__delete(evsel);
441 out_thread_map_delete:
442         thread_map__delete(threads);
443         return err;
444 }
445
446 /*
447  * This test will generate random numbers of calls to some getpid syscalls,
448  * then establish an mmap for a group of events that are created to monitor
449  * the syscalls.
450  *
451  * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
452  * sample.id field to map back to its respective perf_evsel instance.
453  *
454  * Then it checks if the number of syscalls reported as perf events by
455  * the kernel corresponds to the number of syscalls made.
456  */
457 static int test__basic_mmap(void)
458 {
459         int err = -1;
460         union perf_event *event;
461         struct thread_map *threads;
462         struct cpu_map *cpus;
463         struct perf_evlist *evlist;
464         struct perf_event_attr attr = {
465                 .type           = PERF_TYPE_TRACEPOINT,
466                 .read_format    = PERF_FORMAT_ID,
467                 .sample_type    = PERF_SAMPLE_ID,
468                 .watermark      = 0,
469         };
470         cpu_set_t cpu_set;
471         const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
472                                         "getpgid", };
473         pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
474                                       (void*)getpgid };
475 #define nsyscalls ARRAY_SIZE(syscall_names)
476         int ids[nsyscalls];
477         unsigned int nr_events[nsyscalls],
478                      expected_nr_events[nsyscalls], i, j;
479         struct perf_evsel *evsels[nsyscalls], *evsel;
480         int sample_size = __perf_evsel__sample_size(attr.sample_type);
481
482         for (i = 0; i < nsyscalls; ++i) {
483                 char name[64];
484
485                 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
486                 ids[i] = trace_event__id(name);
487                 if (ids[i] < 0) {
488                         pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
489                         return -1;
490                 }
491                 nr_events[i] = 0;
492                 expected_nr_events[i] = random() % 257;
493         }
494
495         threads = thread_map__new(-1, getpid());
496         if (threads == NULL) {
497                 pr_debug("thread_map__new\n");
498                 return -1;
499         }
500
501         cpus = cpu_map__new(NULL);
502         if (cpus == NULL) {
503                 pr_debug("cpu_map__new\n");
504                 goto out_free_threads;
505         }
506
507         CPU_ZERO(&cpu_set);
508         CPU_SET(cpus->map[0], &cpu_set);
509         sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
510         if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
511                 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
512                          cpus->map[0], strerror(errno));
513                 goto out_free_cpus;
514         }
515
516         evlist = perf_evlist__new(cpus, threads);
517         if (evlist == NULL) {
518                 pr_debug("perf_evlist__new\n");
519                 goto out_free_cpus;
520         }
521
522         /* anonymous union fields, can't be initialized above */
523         attr.wakeup_events = 1;
524         attr.sample_period = 1;
525
526         for (i = 0; i < nsyscalls; ++i) {
527                 attr.config = ids[i];
528                 evsels[i] = perf_evsel__new(&attr, i);
529                 if (evsels[i] == NULL) {
530                         pr_debug("perf_evsel__new\n");
531                         goto out_free_evlist;
532                 }
533
534                 perf_evlist__add(evlist, evsels[i]);
535
536                 if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
537                         pr_debug("failed to open counter: %s, "
538                                  "tweak /proc/sys/kernel/perf_event_paranoid?\n",
539                                  strerror(errno));
540                         goto out_close_fd;
541                 }
542         }
543
544         if (perf_evlist__mmap(evlist, 128, true) < 0) {
545                 pr_debug("failed to mmap events: %d (%s)\n", errno,
546                          strerror(errno));
547                 goto out_close_fd;
548         }
549
550         for (i = 0; i < nsyscalls; ++i)
551                 for (j = 0; j < expected_nr_events[i]; ++j) {
552                         int foo = syscalls[i]();
553                         ++foo;
554                 }
555
556         while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
557                 struct perf_sample sample;
558
559                 if (event->header.type != PERF_RECORD_SAMPLE) {
560                         pr_debug("unexpected %s event\n",
561                                  perf_event__name(event->header.type));
562                         goto out_munmap;
563                 }
564
565                 err = perf_event__parse_sample(event, attr.sample_type, sample_size,
566                                                false, &sample, false);
567                 if (err) {
568                         pr_err("Can't parse sample, err = %d\n", err);
569                         goto out_munmap;
570                 }
571
572                 evsel = perf_evlist__id2evsel(evlist, sample.id);
573                 if (evsel == NULL) {
574                         pr_debug("event with id %" PRIu64
575                                  " doesn't map to an evsel\n", sample.id);
576                         goto out_munmap;
577                 }
578                 nr_events[evsel->idx]++;
579         }
580
581         list_for_each_entry(evsel, &evlist->entries, node) {
582                 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
583                         pr_debug("expected %d %s events, got %d\n",
584                                  expected_nr_events[evsel->idx],
585                                  event_name(evsel), nr_events[evsel->idx]);
586                         goto out_munmap;
587                 }
588         }
589
590         err = 0;
591 out_munmap:
592         perf_evlist__munmap(evlist);
593 out_close_fd:
594         for (i = 0; i < nsyscalls; ++i)
595                 perf_evsel__close_fd(evsels[i], 1, threads->nr);
596 out_free_evlist:
597         perf_evlist__delete(evlist);
598 out_free_cpus:
599         cpu_map__delete(cpus);
600 out_free_threads:
601         thread_map__delete(threads);
602         return err;
603 #undef nsyscalls
604 }
605
606 #define TEST_ASSERT_VAL(text, cond) \
607 do { \
608         if (!(cond)) { \
609                 pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
610                 return -1; \
611         } \
612 } while (0)
613
614 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
615 {
616         struct perf_evsel *evsel = list_entry(evlist->entries.next,
617                                               struct perf_evsel, node);
618
619         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
620         TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
621         TEST_ASSERT_VAL("wrong sample_type",
622                 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
623                 evsel->attr.sample_type);
624         TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
625         return 0;
626 }
627
628 static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
629 {
630         struct perf_evsel *evsel;
631
632         TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
633
634         list_for_each_entry(evsel, &evlist->entries, node) {
635                 TEST_ASSERT_VAL("wrong type",
636                         PERF_TYPE_TRACEPOINT == evsel->attr.type);
637                 TEST_ASSERT_VAL("wrong sample_type",
638                         (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
639                         == evsel->attr.sample_type);
640                 TEST_ASSERT_VAL("wrong sample_period",
641                         1 == evsel->attr.sample_period);
642         }
643         return 0;
644 }
645
646 static int test__checkevent_raw(struct perf_evlist *evlist)
647 {
648         struct perf_evsel *evsel = list_entry(evlist->entries.next,
649                                               struct perf_evsel, node);
650
651         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
652         TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
653         TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
654         return 0;
655 }
656
657 static int test__checkevent_numeric(struct perf_evlist *evlist)
658 {
659         struct perf_evsel *evsel = list_entry(evlist->entries.next,
660                                               struct perf_evsel, node);
661
662         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
663         TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
664         TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
665         return 0;
666 }
667
668 static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
669 {
670         struct perf_evsel *evsel = list_entry(evlist->entries.next,
671                                               struct perf_evsel, node);
672
673         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
674         TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
675         TEST_ASSERT_VAL("wrong config",
676                         PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
677         return 0;
678 }
679
680 static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
681 {
682         struct perf_evsel *evsel = list_entry(evlist->entries.next,
683                                               struct perf_evsel, node);
684
685         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
686         TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
687         TEST_ASSERT_VAL("wrong config",
688                         PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
689         return 0;
690 }
691
692 static int test__checkevent_genhw(struct perf_evlist *evlist)
693 {
694         struct perf_evsel *evsel = list_entry(evlist->entries.next,
695                                               struct perf_evsel, node);
696
697         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
698         TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
699         TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config);
700         return 0;
701 }
702
703 static int test__checkevent_breakpoint(struct perf_evlist *evlist)
704 {
705         struct perf_evsel *evsel = list_entry(evlist->entries.next,
706                                               struct perf_evsel, node);
707
708         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
709         TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
710         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
711         TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
712                                          evsel->attr.bp_type);
713         TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
714                                         evsel->attr.bp_len);
715         return 0;
716 }
717
718 static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
719 {
720         struct perf_evsel *evsel = list_entry(evlist->entries.next,
721                                               struct perf_evsel, node);
722
723         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
724         TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
725         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
726         TEST_ASSERT_VAL("wrong bp_type",
727                         HW_BREAKPOINT_X == evsel->attr.bp_type);
728         TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len);
729         return 0;
730 }
731
732 static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
733 {
734         struct perf_evsel *evsel = list_entry(evlist->entries.next,
735                                               struct perf_evsel, node);
736
737         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
738         TEST_ASSERT_VAL("wrong type",
739                         PERF_TYPE_BREAKPOINT == evsel->attr.type);
740         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
741         TEST_ASSERT_VAL("wrong bp_type",
742                         HW_BREAKPOINT_R == evsel->attr.bp_type);
743         TEST_ASSERT_VAL("wrong bp_len",
744                         HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
745         return 0;
746 }
747
748 static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
749 {
750         struct perf_evsel *evsel = list_entry(evlist->entries.next,
751                                               struct perf_evsel, node);
752
753         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
754         TEST_ASSERT_VAL("wrong type",
755                         PERF_TYPE_BREAKPOINT == evsel->attr.type);
756         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
757         TEST_ASSERT_VAL("wrong bp_type",
758                         HW_BREAKPOINT_W == evsel->attr.bp_type);
759         TEST_ASSERT_VAL("wrong bp_len",
760                         HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
761         return 0;
762 }
763
764 static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
765 {
766         struct perf_evsel *evsel = list_entry(evlist->entries.next,
767                                               struct perf_evsel, node);
768
769         TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
770         TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
771         TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
772         TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
773
774         return test__checkevent_tracepoint(evlist);
775 }
776
777 static int
778 test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
779 {
780         struct perf_evsel *evsel;
781
782         TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
783
784         list_for_each_entry(evsel, &evlist->entries, node) {
785                 TEST_ASSERT_VAL("wrong exclude_user",
786                                 !evsel->attr.exclude_user);
787                 TEST_ASSERT_VAL("wrong exclude_kernel",
788                                 evsel->attr.exclude_kernel);
789                 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
790                 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
791         }
792
793         return test__checkevent_tracepoint_multi(evlist);
794 }
795
796 static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
797 {
798         struct perf_evsel *evsel = list_entry(evlist->entries.next,
799                                               struct perf_evsel, node);
800
801         TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
802         TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
803         TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
804         TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
805
806         return test__checkevent_raw(evlist);
807 }
808
809 static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
810 {
811         struct perf_evsel *evsel = list_entry(evlist->entries.next,
812                                               struct perf_evsel, node);
813
814         TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
815         TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
816         TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
817         TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
818
819         return test__checkevent_numeric(evlist);
820 }
821
822 static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
823 {
824         struct perf_evsel *evsel = list_entry(evlist->entries.next,
825                                               struct perf_evsel, node);
826
827         TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
828         TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
829         TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
830         TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
831
832         return test__checkevent_symbolic_name(evlist);
833 }
834
835 static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
836 {
837         struct perf_evsel *evsel = list_entry(evlist->entries.next,
838                                               struct perf_evsel, node);
839
840         TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
841         TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
842         TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
843         TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
844
845         return test__checkevent_symbolic_alias(evlist);
846 }
847
848 static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
849 {
850         struct perf_evsel *evsel = list_entry(evlist->entries.next,
851                                               struct perf_evsel, node);
852
853         TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
854         TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
855         TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
856         TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
857
858         return test__checkevent_genhw(evlist);
859 }
860
861 static struct test__event_st {
862         const char *name;
863         __u32 type;
864         int (*check)(struct perf_evlist *evlist);
865 } test__events[] = {
866         {
867                 .name  = "syscalls:sys_enter_open",
868                 .check = test__checkevent_tracepoint,
869         },
870         {
871                 .name  = "syscalls:*",
872                 .check = test__checkevent_tracepoint_multi,
873         },
874         {
875                 .name  = "r1",
876                 .check = test__checkevent_raw,
877         },
878         {
879                 .name  = "1:1",
880                 .check = test__checkevent_numeric,
881         },
882         {
883                 .name  = "instructions",
884                 .check = test__checkevent_symbolic_name,
885         },
886         {
887                 .name  = "faults",
888                 .check = test__checkevent_symbolic_alias,
889         },
890         {
891                 .name  = "L1-dcache-load-miss",
892                 .check = test__checkevent_genhw,
893         },
894         {
895                 .name  = "mem:0",
896                 .check = test__checkevent_breakpoint,
897         },
898         {
899                 .name  = "mem:0:x",
900                 .check = test__checkevent_breakpoint_x,
901         },
902         {
903                 .name  = "mem:0:r",
904                 .check = test__checkevent_breakpoint_r,
905         },
906         {
907                 .name  = "mem:0:w",
908                 .check = test__checkevent_breakpoint_w,
909         },
910         {
911                 .name  = "syscalls:sys_enter_open:k",
912                 .check = test__checkevent_tracepoint_modifier,
913         },
914         {
915                 .name  = "syscalls:*:u",
916                 .check = test__checkevent_tracepoint_multi_modifier,
917         },
918         {
919                 .name  = "r1:kp",
920                 .check = test__checkevent_raw_modifier,
921         },
922         {
923                 .name  = "1:1:hp",
924                 .check = test__checkevent_numeric_modifier,
925         },
926         {
927                 .name  = "instructions:h",
928                 .check = test__checkevent_symbolic_name_modifier,
929         },
930         {
931                 .name  = "faults:u",
932                 .check = test__checkevent_symbolic_alias_modifier,
933         },
934         {
935                 .name  = "L1-dcache-load-miss:kp",
936                 .check = test__checkevent_genhw_modifier,
937         },
938 };
939
940 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
941
942 static int test__parse_events(void)
943 {
944         struct perf_evlist *evlist;
945         u_int i;
946         int ret = 0;
947
948         for (i = 0; i < TEST__EVENTS_CNT; i++) {
949                 struct test__event_st *e = &test__events[i];
950
951                 evlist = perf_evlist__new(NULL, NULL);
952                 if (evlist == NULL)
953                         break;
954
955                 ret = parse_events(evlist, e->name, 0);
956                 if (ret) {
957                         pr_debug("failed to parse event '%s', err %d\n",
958                                  e->name, ret);
959                         break;
960                 }
961
962                 ret = e->check(evlist);
963                 if (ret)
964                         break;
965
966                 perf_evlist__delete(evlist);
967         }
968
969         return ret;
970 }
971
972 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
973                                          size_t *sizep)
974 {
975         cpu_set_t *mask;
976         size_t size;
977         int i, cpu = -1, nrcpus = 1024;
978 realloc:
979         mask = CPU_ALLOC(nrcpus);
980         size = CPU_ALLOC_SIZE(nrcpus);
981         CPU_ZERO_S(size, mask);
982
983         if (sched_getaffinity(pid, size, mask) == -1) {
984                 CPU_FREE(mask);
985                 if (errno == EINVAL && nrcpus < (1024 << 8)) {
986                         nrcpus = nrcpus << 2;
987                         goto realloc;
988                 }
989                 perror("sched_getaffinity");
990                         return -1;
991         }
992
993         for (i = 0; i < nrcpus; i++) {
994                 if (CPU_ISSET_S(i, size, mask)) {
995                         if (cpu == -1) {
996                                 cpu = i;
997                                 *maskp = mask;
998                                 *sizep = size;
999                         } else
1000                                 CPU_CLR_S(i, size, mask);
1001                 }
1002         }
1003
1004         if (cpu == -1)
1005                 CPU_FREE(mask);
1006
1007         return cpu;
1008 }
1009
1010 static int test__PERF_RECORD(void)
1011 {
1012         struct perf_record_opts opts = {
1013                 .target_pid = -1,
1014                 .target_tid = -1,
1015                 .no_delay   = true,
1016                 .freq       = 10,
1017                 .mmap_pages = 256,
1018                 .sample_id_all_avail = true,
1019         };
1020         cpu_set_t *cpu_mask = NULL;
1021         size_t cpu_mask_size = 0;
1022         struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1023         struct perf_evsel *evsel;
1024         struct perf_sample sample;
1025         const char *cmd = "sleep";
1026         const char *argv[] = { cmd, "1", NULL, };
1027         char *bname;
1028         u64 sample_type, prev_time = 0;
1029         bool found_cmd_mmap = false,
1030              found_libc_mmap = false,
1031              found_vdso_mmap = false,
1032              found_ld_mmap = false;
1033         int err = -1, errs = 0, i, wakeups = 0, sample_size;
1034         u32 cpu;
1035         int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
1036
1037         if (evlist == NULL || argv == NULL) {
1038                 pr_debug("Not enough memory to create evlist\n");
1039                 goto out;
1040         }
1041
1042         /*
1043          * We need at least one evsel in the evlist, use the default
1044          * one: "cycles".
1045          */
1046         err = perf_evlist__add_default(evlist);
1047         if (err < 0) {
1048                 pr_debug("Not enough memory to create evsel\n");
1049                 goto out_delete_evlist;
1050         }
1051
1052         /*
1053          * Create maps of threads and cpus to monitor. In this case
1054          * we start with all threads and cpus (-1, -1) but then in
1055          * perf_evlist__prepare_workload we'll fill in the only thread
1056          * we're monitoring, the one forked there.
1057          */
1058         err = perf_evlist__create_maps(evlist, opts.target_pid,
1059                                        opts.target_tid, opts.cpu_list);
1060         if (err < 0) {
1061                 pr_debug("Not enough memory to create thread/cpu maps\n");
1062                 goto out_delete_evlist;
1063         }
1064
1065         /*
1066          * Prepare the workload in argv[] to run, it'll fork it, and then wait
1067          * for perf_evlist__start_workload() to exec it. This is done this way
1068          * so that we have time to open the evlist (calling sys_perf_event_open
1069          * on all the fds) and then mmap them.
1070          */
1071         err = perf_evlist__prepare_workload(evlist, &opts, argv);
1072         if (err < 0) {
1073                 pr_debug("Couldn't run the workload!\n");
1074                 goto out_delete_evlist;
1075         }
1076
1077         /*
1078          * Config the evsels, setting attr->comm on the first one, etc.
1079          */
1080         evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
1081         evsel->attr.sample_type |= PERF_SAMPLE_CPU;
1082         evsel->attr.sample_type |= PERF_SAMPLE_TID;
1083         evsel->attr.sample_type |= PERF_SAMPLE_TIME;
1084         perf_evlist__config_attrs(evlist, &opts);
1085
1086         err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
1087                                             &cpu_mask_size);
1088         if (err < 0) {
1089                 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
1090                 goto out_delete_evlist;
1091         }
1092
1093         cpu = err;
1094
1095         /*
1096          * So that we can check perf_sample.cpu on all the samples.
1097          */
1098         if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
1099                 pr_debug("sched_setaffinity: %s\n", strerror(errno));
1100                 goto out_free_cpu_mask;
1101         }
1102
1103         /*
1104          * Call sys_perf_event_open on all the fds on all the evsels,
1105          * grouping them if asked to.
1106          */
1107         err = perf_evlist__open(evlist, opts.group);
1108         if (err < 0) {
1109                 pr_debug("perf_evlist__open: %s\n", strerror(errno));
1110                 goto out_delete_evlist;
1111         }
1112
1113         /*
1114          * mmap the first fd on a given CPU and ask for events for the other
1115          * fds in the same CPU to be injected in the same mmap ring buffer
1116          * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
1117          */
1118         err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
1119         if (err < 0) {
1120                 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
1121                 goto out_delete_evlist;
1122         }
1123
1124         /*
1125          * We'll need these two to parse the PERF_SAMPLE_* fields in each
1126          * event.
1127          */
1128         sample_type = perf_evlist__sample_type(evlist);
1129         sample_size = __perf_evsel__sample_size(sample_type);
1130
1131         /*
1132          * Now that all is properly set up, enable the events, they will
1133          * count just on workload.pid, which will start...
1134          */
1135         perf_evlist__enable(evlist);
1136
1137         /*
1138          * Now!
1139          */
1140         perf_evlist__start_workload(evlist);
1141
1142         while (1) {
1143                 int before = total_events;
1144
1145                 for (i = 0; i < evlist->nr_mmaps; i++) {
1146                         union perf_event *event;
1147
1148                         while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
1149                                 const u32 type = event->header.type;
1150                                 const char *name = perf_event__name(type);
1151
1152                                 ++total_events;
1153                                 if (type < PERF_RECORD_MAX)
1154                                         nr_events[type]++;
1155
1156                                 err = perf_event__parse_sample(event, sample_type,
1157                                                                sample_size, true,
1158                                                                &sample, false);
1159                                 if (err < 0) {
1160                                         if (verbose)
1161                                                 perf_event__fprintf(event, stderr);
1162                                         pr_debug("Couldn't parse sample\n");
1163                                         goto out_err;
1164                                 }
1165
1166                                 if (verbose) {
1167                                         pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
1168                                         perf_event__fprintf(event, stderr);
1169                                 }
1170
1171                                 if (prev_time > sample.time) {
1172                                         pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
1173                                                  name, prev_time, sample.time);
1174                                         ++errs;
1175                                 }
1176
1177                                 prev_time = sample.time;
1178
1179                                 if (sample.cpu != cpu) {
1180                                         pr_debug("%s with unexpected cpu, expected %d, got %d\n",
1181                                                  name, cpu, sample.cpu);
1182                                         ++errs;
1183                                 }
1184
1185                                 if ((pid_t)sample.pid != evlist->workload.pid) {
1186                                         pr_debug("%s with unexpected pid, expected %d, got %d\n",
1187                                                  name, evlist->workload.pid, sample.pid);
1188                                         ++errs;
1189                                 }
1190
1191                                 if ((pid_t)sample.tid != evlist->workload.pid) {
1192                                         pr_debug("%s with unexpected tid, expected %d, got %d\n",
1193                                                  name, evlist->workload.pid, sample.tid);
1194                                         ++errs;
1195                                 }
1196
1197                                 if ((type == PERF_RECORD_COMM ||
1198                                      type == PERF_RECORD_MMAP ||
1199                                      type == PERF_RECORD_FORK ||
1200                                      type == PERF_RECORD_EXIT) &&
1201                                      (pid_t)event->comm.pid != evlist->workload.pid) {
1202                                         pr_debug("%s with unexpected pid/tid\n", name);
1203                                         ++errs;
1204                                 }
1205
1206                                 if ((type == PERF_RECORD_COMM ||
1207                                      type == PERF_RECORD_MMAP) &&
1208                                      event->comm.pid != event->comm.tid) {
1209                                         pr_debug("%s with different pid/tid!\n", name);
1210                                         ++errs;
1211                                 }
1212
1213                                 switch (type) {
1214                                 case PERF_RECORD_COMM:
1215                                         if (strcmp(event->comm.comm, cmd)) {
1216                                                 pr_debug("%s with unexpected comm!\n", name);
1217                                                 ++errs;
1218                                         }
1219                                         break;
1220                                 case PERF_RECORD_EXIT:
1221                                         goto found_exit;
1222                                 case PERF_RECORD_MMAP:
1223                                         bname = strrchr(event->mmap.filename, '/');
1224                                         if (bname != NULL) {
1225                                                 if (!found_cmd_mmap)
1226                                                         found_cmd_mmap = !strcmp(bname + 1, cmd);
1227                                                 if (!found_libc_mmap)
1228                                                         found_libc_mmap = !strncmp(bname + 1, "libc", 4);
1229                                                 if (!found_ld_mmap)
1230                                                         found_ld_mmap = !strncmp(bname + 1, "ld", 2);
1231                                         } else if (!found_vdso_mmap)
1232                                                 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
1233                                         break;
1234
1235                                 case PERF_RECORD_SAMPLE:
1236                                         /* Just ignore samples for now */
1237                                         break;
1238                                 default:
1239                                         pr_debug("Unexpected perf_event->header.type %d!\n",
1240                                                  type);
1241                                         ++errs;
1242                                 }
1243                         }
1244                 }
1245
1246                 /*
1247                  * We don't use poll here because at least at 3.1 times the
1248                  * PERF_RECORD_{!SAMPLE} events don't honour
1249                  * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
1250                  */
1251                 if (total_events == before && false)
1252                         poll(evlist->pollfd, evlist->nr_fds, -1);
1253
1254                 sleep(1);
1255                 if (++wakeups > 5) {
1256                         pr_debug("No PERF_RECORD_EXIT event!\n");
1257                         break;
1258                 }
1259         }
1260
1261 found_exit:
1262         if (nr_events[PERF_RECORD_COMM] > 1) {
1263                 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
1264                 ++errs;
1265         }
1266
1267         if (nr_events[PERF_RECORD_COMM] == 0) {
1268                 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
1269                 ++errs;
1270         }
1271
1272         if (!found_cmd_mmap) {
1273                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
1274                 ++errs;
1275         }
1276
1277         if (!found_libc_mmap) {
1278                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
1279                 ++errs;
1280         }
1281
1282         if (!found_ld_mmap) {
1283                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
1284                 ++errs;
1285         }
1286
1287         if (!found_vdso_mmap) {
1288                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
1289                 ++errs;
1290         }
1291 out_err:
1292         perf_evlist__munmap(evlist);
1293 out_free_cpu_mask:
1294         CPU_FREE(cpu_mask);
1295 out_delete_evlist:
1296         perf_evlist__delete(evlist);
1297 out:
1298         return (err < 0 || errs > 0) ? -1 : 0;
1299 }
1300
1301
1302 #if defined(__x86_64__) || defined(__i386__)
1303
1304 #define barrier() asm volatile("" ::: "memory")
1305
1306 static u64 rdpmc(unsigned int counter)
1307 {
1308         unsigned int low, high;
1309
1310         asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
1311
1312         return low | ((u64)high) << 32;
1313 }
1314
1315 static u64 rdtsc(void)
1316 {
1317         unsigned int low, high;
1318
1319         asm volatile("rdtsc" : "=a" (low), "=d" (high));
1320
1321         return low | ((u64)high) << 32;
1322 }
1323
1324 static u64 mmap_read_self(void *addr)
1325 {
1326         struct perf_event_mmap_page *pc = addr;
1327         u32 seq, idx, time_mult = 0, time_shift = 0;
1328         u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
1329
1330         do {
1331                 seq = pc->lock;
1332                 barrier();
1333
1334                 enabled = pc->time_enabled;
1335                 running = pc->time_running;
1336
1337                 if (enabled != running) {
1338                         cyc = rdtsc();
1339                         time_mult = pc->time_mult;
1340                         time_shift = pc->time_shift;
1341                         time_offset = pc->time_offset;
1342                 }
1343
1344                 idx = pc->index;
1345                 count = pc->offset;
1346                 if (idx)
1347                         count += rdpmc(idx - 1);
1348
1349                 barrier();
1350         } while (pc->lock != seq);
1351
1352         if (enabled != running) {
1353                 u64 quot, rem;
1354
1355                 quot = (cyc >> time_shift);
1356                 rem = cyc & ((1 << time_shift) - 1);
1357                 delta = time_offset + quot * time_mult +
1358                         ((rem * time_mult) >> time_shift);
1359
1360                 enabled += delta;
1361                 if (idx)
1362                         running += delta;
1363
1364                 quot = count / running;
1365                 rem = count % running;
1366                 count = quot * enabled + (rem * enabled) / running;
1367         }
1368
1369         return count;
1370 }
1371
1372 /*
1373  * If the RDPMC instruction faults then signal this back to the test parent task:
1374  */
1375 static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used)
1376 {
1377         exit(-1);
1378 }
1379
1380 static int __test__rdpmc(void)
1381 {
1382         long page_size = sysconf(_SC_PAGE_SIZE);
1383         volatile int tmp = 0;
1384         u64 i, loops = 1000;
1385         int n;
1386         int fd;
1387         void *addr;
1388         struct perf_event_attr attr = {
1389                 .type = PERF_TYPE_HARDWARE,
1390                 .config = PERF_COUNT_HW_INSTRUCTIONS,
1391                 .exclude_kernel = 1,
1392         };
1393         u64 delta_sum = 0;
1394         struct sigaction sa;
1395
1396         sigfillset(&sa.sa_mask);
1397         sa.sa_sigaction = segfault_handler;
1398         sigaction(SIGSEGV, &sa, NULL);
1399
1400         fprintf(stderr, "\n\n");
1401
1402         fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
1403         if (fd < 0) {
1404                 die("Error: sys_perf_event_open() syscall returned "
1405                     "with %d (%s)\n", fd, strerror(errno));
1406         }
1407
1408         addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
1409         if (addr == (void *)(-1)) {
1410                 die("Error: mmap() syscall returned "
1411                     "with (%s)\n", strerror(errno));
1412         }
1413
1414         for (n = 0; n < 6; n++) {
1415                 u64 stamp, now, delta;
1416
1417                 stamp = mmap_read_self(addr);
1418
1419                 for (i = 0; i < loops; i++)
1420                         tmp++;
1421
1422                 now = mmap_read_self(addr);
1423                 loops *= 10;
1424
1425                 delta = now - stamp;
1426                 fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta);
1427
1428                 delta_sum += delta;
1429         }
1430
1431         munmap(addr, page_size);
1432         close(fd);
1433
1434         fprintf(stderr, "   ");
1435
1436         if (!delta_sum)
1437                 return -1;
1438
1439         return 0;
1440 }
1441
1442 static int test__rdpmc(void)
1443 {
1444         int status = 0;
1445         int wret = 0;
1446         int ret;
1447         int pid;
1448
1449         pid = fork();
1450         if (pid < 0)
1451                 return -1;
1452
1453         if (!pid) {
1454                 ret = __test__rdpmc();
1455
1456                 exit(ret);
1457         }
1458
1459         wret = waitpid(pid, &status, 0);
1460         if (wret < 0 || status)
1461                 return -1;
1462
1463         return 0;
1464 }
1465
1466 #endif
1467
1468 static struct test {
1469         const char *desc;
1470         int (*func)(void);
1471 } tests[] = {
1472         {
1473                 .desc = "vmlinux symtab matches kallsyms",
1474                 .func = test__vmlinux_matches_kallsyms,
1475         },
1476         {
1477                 .desc = "detect open syscall event",
1478                 .func = test__open_syscall_event,
1479         },
1480         {
1481                 .desc = "detect open syscall event on all cpus",
1482                 .func = test__open_syscall_event_on_all_cpus,
1483         },
1484         {
1485                 .desc = "read samples using the mmap interface",
1486                 .func = test__basic_mmap,
1487         },
1488         {
1489                 .desc = "parse events tests",
1490                 .func = test__parse_events,
1491         },
1492 #if defined(__x86_64__) || defined(__i386__)
1493         {
1494                 .desc = "x86 rdpmc test",
1495                 .func = test__rdpmc,
1496         },
1497 #endif
1498         {
1499                 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1500                 .func = test__PERF_RECORD,
1501         },
1502         {
1503                 .func = NULL,
1504         },
1505 };
1506
1507 static bool perf_test__matches(int curr, int argc, const char *argv[])
1508 {
1509         int i;
1510
1511         if (argc == 0)
1512                 return true;
1513
1514         for (i = 0; i < argc; ++i) {
1515                 char *end;
1516                 long nr = strtoul(argv[i], &end, 10);
1517
1518                 if (*end == '\0') {
1519                         if (nr == curr + 1)
1520                                 return true;
1521                         continue;
1522                 }
1523
1524                 if (strstr(tests[curr].desc, argv[i]))
1525                         return true;
1526         }
1527
1528         return false;
1529 }
1530
1531 static int __cmd_test(int argc, const char *argv[])
1532 {
1533         int i = 0;
1534
1535         while (tests[i].func) {
1536                 int curr = i++, err;
1537
1538                 if (!perf_test__matches(curr, argc, argv))
1539                         continue;
1540
1541                 pr_info("%2d: %s:", i, tests[curr].desc);
1542                 pr_debug("\n--- start ---\n");
1543                 err = tests[curr].func();
1544                 pr_debug("---- end ----\n%s:", tests[curr].desc);
1545                 pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1546         }
1547
1548         return 0;
1549 }
1550
1551 static int perf_test__list(int argc, const char **argv)
1552 {
1553         int i = 0;
1554
1555         while (tests[i].func) {
1556                 int curr = i++;
1557
1558                 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1559                         continue;
1560
1561                 pr_info("%2d: %s\n", i, tests[curr].desc);
1562         }
1563
1564         return 0;
1565 }
1566
1567 int cmd_test(int argc, const char **argv, const char *prefix __used)
1568 {
1569         const char * const test_usage[] = {
1570         "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1571         NULL,
1572         };
1573         const struct option test_options[] = {
1574         OPT_INTEGER('v', "verbose", &verbose,
1575                     "be more verbose (show symbol address, etc)"),
1576         OPT_END()
1577         };
1578
1579         argc = parse_options(argc, argv, test_options, test_usage, 0);
1580         if (argc >= 1 && !strcmp(argv[0], "list"))
1581                 return perf_test__list(argc, argv);
1582
1583         symbol_conf.priv_size = sizeof(int);
1584         symbol_conf.sort_by_name = true;
1585         symbol_conf.try_vmlinux_path = true;
1586
1587         if (symbol__init() < 0)
1588                 return -1;
1589
1590         return __cmd_test(argc, argv);
1591 }