Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Feb 2013 01:49:41 +0000 (17:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Feb 2013 01:49:41 +0000 (17:49 -0800)
Pull perf changes from Ingo Molnar:
 "There are lots of improvements, the biggest changes are:

  Main kernel side changes:

   - Improve uprobes performance by adding 'pre-filtering' support, by
     Oleg Nesterov.

   - Make some POWER7 events available in sysfs, equivalent to what was
     done on x86, from Sukadev Bhattiprolu.

   - tracing updates by Steve Rostedt - mostly misc fixes and smaller
     improvements.

   - Use perf/event tracing to report PCI Express advanced errors, by
     Tony Luck.

   - Enable northbridge performance counters on AMD family 15h, by Jacob
     Shin.

   - This tracing commit:

        tracing: Remove the extra 4 bytes of padding in events

     changes the ABI.  All involved parties (PowerTop in particular)
     seem to agree that it's safe to do now with the introduction of
     libtraceevent, but the devil is in the details ...

  Main tooling side changes:

   - Add 'event group view', from Namyung Kim:

     To use it, 'perf record' should group events when recording.  And
     then perf report parses the saved group relation from file header
     and prints them together if --group option is provided.  You can
     use the 'perf evlist' command to see event group information:

        $ perf record -e '{ref-cycles,cycles}' noploop 1
        [ perf record: Woken up 2 times to write data ]
        [ perf record: Captured and wrote 0.385 MB perf.data (~16807 samples) ]

        $ perf evlist --group
        {ref-cycles,cycles}

     With this example, default perf report will show you each event
     separately.

     You can use --group option to enable event group view:

        $ perf report --group
        ...
        # group: {ref-cycles,cycles}
        # ========
        # Samples: 7K of event 'anon group { ref-cycles, cycles }'
        # Event count (approx.): 6876107743
        #
        #         Overhead  Command      Shared Object                      Symbol
        # ................  .......  .................  ..........................
            99.84%  99.76%  noploop  noploop            [.] main
             0.07%   0.00%  noploop  ld-2.15.so         [.] strcmp
             0.03%   0.00%  noploop  [kernel.kallsyms]  [k] timerqueue_del
             0.03%   0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
             0.02%   0.00%  noploop  [kernel.kallsyms]  [k] account_user_time
             0.01%   0.00%  noploop  [kernel.kallsyms]  [k] __alloc_pages_nodemask
             0.00%   0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe
             0.00%   0.11%  noploop  [kernel.kallsyms]  [k] _raw_spin_lock
             0.00%   0.06%  noploop  [kernel.kallsyms]  [k] find_get_page
             0.00%   0.02%  noploop  [kernel.kallsyms]  [k] rcu_check_callbacks
             0.00%   0.02%  noploop  [kernel.kallsyms]  [k] __current_kernel_time

     As you can see the Overhead column now contains both of ref-cycles
     and cycles and header line shows group information also - 'anon
     group { ref-cycles, cycles }'.  The output is sorted by period of
     group leader first.

   - Initial GTK+ annotate browser, from Namhyung Kim.

   - Add option for runtime switching perf data file in perf report,
     just press 's' and a menu with the valid files found in the current
     directory will be presented, from Feng Tang.

   - Add support to display whole group data for raw columns, from Jiri
     Olsa.

   - Add per processor socket count aggregation in perf stat, from
     Stephane Eranian.

   - Add interval printing in 'perf stat', from Stephane Eranian.

   - 'perf test' improvements

   - Add support for wildcards in tracepoint system name, from Jiri
     Olsa.

   - Add anonymous huge page recognition, from Joshua Zhu.

   - perf build-id cache now can show DSOs present in a perf.data file
     that are not in the cache, to integrate with build-id servers being
     put in place by organizations such as Fedora.

   - perf top now shares more of the evsel config/creation routines with
     'record', paving the way for further integration like 'top'
     snapshots, etc.

   - perf top now supports DWARF callchains.

   - Fix mmap limitations on 32-bit, fix from David Miller.

   - 'perf bench numa mem' NUMA performance measurement suite

   - ... and lots of fixes, performance improvements, cleanups and other
     improvements I failed to list - see the shortlog and git log for
     details."

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (270 commits)
  perf/x86/amd: Enable northbridge performance counters on AMD family 15h
  perf/hwbp: Fix cleanup in case of kzalloc failure
  perf tools: Fix build with bison 2.3 and older.
  perf tools: Limit unwind support to x86 archs
  perf annotate: Make it to be able to skip unannotatable symbols
  perf gtk/annotate: Fail early if it can't annotate
  perf gtk/annotate: Show source lines with gray color
  perf gtk/annotate: Support multiple event annotation
  perf ui/gtk: Implement basic GTK2 annotation browser
  perf annotate: Fix warning message on a missing vmlinux
  perf buildid-cache: Add --update option
  uprobes/perf: Avoid uprobe_apply() whenever possible
  uprobes/perf: Teach trace_uprobe/perf code to use UPROBE_HANDLER_REMOVE
  uprobes/perf: Teach trace_uprobe/perf code to pre-filter
  uprobes/perf: Teach trace_uprobe/perf code to track the active perf_event's
  uprobes: Introduce uprobe_apply()
  perf: Introduce hw_perf_event->tp_target and ->tp_list
  uprobes/perf: Always increment trace_uprobe->nhit
  uprobes/tracing: Kill uprobe_trace_consumer, embed uprobe_consumer into trace_uprobe
  uprobes/tracing: Introduce is_trace_uprobe_enabled()
  ...

198 files changed:
Documentation/ABI/testing/sysfs-bus-event_source-devices-events [new file with mode: 0644]
Documentation/trace/ftrace.txt
arch/Kconfig
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/power7-pmu.c
arch/x86/Kconfig
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/perf_event.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/Makefile
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/kprobes-common.h [deleted file]
arch/x86/kernel/kprobes-opt.c [deleted file]
arch/x86/kernel/kprobes.c [deleted file]
arch/x86/kernel/kprobes/Makefile [new file with mode: 0644]
arch/x86/kernel/kprobes/common.h [new file with mode: 0644]
arch/x86/kernel/kprobes/core.c [new file with mode: 0644]
arch/x86/kernel/kprobes/ftrace.c [new file with mode: 0644]
arch/x86/kernel/kprobes/opt.c [new file with mode: 0644]
arch/x86/kernel/uprobes.c
drivers/acpi/apei/cper.c
drivers/pci/pcie/aer/aerdrv_errprint.c
include/linux/aer.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/hardirq.h
include/linux/kprobes.h
include/linux/perf_event.h
include/linux/profile.h
include/linux/ring_buffer.h
include/linux/uprobes.h
include/trace/events/ras.h [new file with mode: 0644]
include/uapi/linux/perf_event.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/uprobes.c
kernel/kprobes.c
kernel/profile.c
kernel/ptrace.c
kernel/trace/Kconfig
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_clock.c
kernel/trace/trace_events.c
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_probe.h
kernel/trace/trace_selftest.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
samples/Kconfig
samples/Makefile
samples/tracepoints/Makefile [deleted file]
samples/tracepoints/tp-samples-trace.h [deleted file]
samples/tracepoints/tracepoint-probe-sample.c [deleted file]
samples/tracepoints/tracepoint-probe-sample2.c [deleted file]
samples/tracepoints/tracepoint-sample.c [deleted file]
tools/Makefile
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-utils.h
tools/lib/traceevent/parse-filter.c
tools/lib/traceevent/parse-utils.c
tools/lib/traceevent/trace-seq.c
tools/perf/Documentation/Makefile
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-buildid-cache.txt
tools/perf/Documentation/perf-diff.txt
tools/perf/Documentation/perf-evlist.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-test.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Makefile
tools/perf/arch/common.c
tools/perf/bench/bench.h
tools/perf/bench/numa.c [new file with mode: 0644]
tools/perf/builtin-annotate.c
tools/perf/builtin-bench.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-buildid-list.c
tools/perf/builtin-diff.c
tools/perf/builtin-evlist.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/feature-tests.mak
tools/perf/config/utilities.mak
tools/perf/perf.c
tools/perf/perf.h
tools/perf/scripts/perl/bin/workqueue-stats-record [deleted file]
tools/perf/scripts/perl/bin/workqueue-stats-report [deleted file]
tools/perf/scripts/perl/rwtop.pl
tools/perf/scripts/perl/workqueue-stats.pl [deleted file]
tools/perf/tests/attr.c
tools/perf/tests/attr.py
tools/perf/tests/attr/base-record
tools/perf/tests/attr/test-record-group
tools/perf/tests/attr/test-record-group1
tools/perf/tests/builtin-test.c
tools/perf/tests/evsel-roundtrip-name.c
tools/perf/tests/hists_link.c [new file with mode: 0644]
tools/perf/tests/mmap-basic.c
tools/perf/tests/open-syscall-all-cpus.c
tools/perf/tests/open-syscall.c
tools/perf/tests/parse-events.c
tools/perf/tests/perf-record.c
tools/perf/tests/pmu.c
tools/perf/tests/python-use.c [new file with mode: 0644]
tools/perf/tests/tests.h
tools/perf/tests/util.c [deleted file]
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/ui/browser.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/gtk/annotate.c [new file with mode: 0644]
tools/perf/ui/gtk/browser.c
tools/perf/ui/gtk/gtk.h
tools/perf/ui/gtk/helpline.c
tools/perf/ui/gtk/hists.c [new file with mode: 0644]
tools/perf/ui/helpline.c
tools/perf/ui/helpline.h
tools/perf/ui/hist.c
tools/perf/ui/keysyms.h
tools/perf/ui/setup.c
tools/perf/ui/stdio/hist.c
tools/perf/ui/tui/helpline.c
tools/perf/ui/util.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/bitops.h
tools/perf/util/intlist.c
tools/perf/util/intlist.h
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/pmu.y
tools/perf/util/probe-finder.c
tools/perf/util/python-ext-sources
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/string.c
tools/perf/util/strlist.c
tools/perf/util/strlist.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol-minimal.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/sysfs.c
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/top.c
tools/perf/util/top.h
tools/perf/util/util.c
tools/perf/util/util.h

diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
new file mode 100644 (file)
index 0000000..0adeb52
--- /dev/null
@@ -0,0 +1,62 @@
+What:          /sys/devices/cpu/events/
+               /sys/devices/cpu/events/branch-misses
+               /sys/devices/cpu/events/cache-references
+               /sys/devices/cpu/events/cache-misses
+               /sys/devices/cpu/events/stalled-cycles-frontend
+               /sys/devices/cpu/events/branch-instructions
+               /sys/devices/cpu/events/stalled-cycles-backend
+               /sys/devices/cpu/events/instructions
+               /sys/devices/cpu/events/cpu-cycles
+
+Date:          2013/01/08
+
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+
+Description:   Generic performance monitoring events
+
+               A collection of performance monitoring events that may be
+               supported by many/most CPUs. These events can be monitored
+               using the 'perf(1)' tool.
+
+               The contents of each file would look like:
+
+                       event=0xNNNN
+
+               where 'N' is a hex digit and the number '0xNNNN' shows the
+               "raw code" for the perf event identified by the file's
+               "basename".
+
+
+What:          /sys/devices/cpu/events/PM_LD_MISS_L1
+               /sys/devices/cpu/events/PM_LD_REF_L1
+               /sys/devices/cpu/events/PM_CYC
+               /sys/devices/cpu/events/PM_BRU_FIN
+               /sys/devices/cpu/events/PM_GCT_NOSLOT_CYC
+               /sys/devices/cpu/events/PM_BRU_MPRED
+               /sys/devices/cpu/events/PM_INST_CMPL
+               /sys/devices/cpu/events/PM_CMPLU_STALL
+
+Date:          2013/01/08
+
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+               Linux Powerpc mailing list <linuxppc-dev@ozlabs.org>
+
+Description:   POWER-systems specific performance monitoring events
+
+               A collection of performance monitoring events that may be
+               supported by the POWER CPU. These events can be monitored
+               using the 'perf(1)' tool.
+
+               These events may not be supported by other CPUs.
+
+               The contents of each file would look like:
+
+                       event=0xNNNN
+
+               where 'N' is a hex digit and the number '0xNNNN' shows the
+               "raw code" for the perf event identified by the file's
+               "basename".
+
+               Further, multiple terms like 'event=0xNNNN' can be specified
+               and separated with comma. All available terms are defined in
+               the /sys/bus/event_source/devices/<dev>/format file.
index 6f51fed45f2d2f83ca6fcaaeb83dc1e5ee64fb6c..53d6a3c51d875771fc2966c9917aec13b17f53c6 100644 (file)
@@ -1842,6 +1842,89 @@ an error.
  # cat buffer_size_kb
 85
 
+Snapshot
+--------
+CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature
+available to all non latency tracers. (Latency tracers which
+record max latency, such as "irqsoff" or "wakeup", can't use
+this feature, since those are already using the snapshot
+mechanism internally.)
+
+Snapshot preserves a current trace buffer at a particular point
+in time without stopping tracing. Ftrace swaps the current
+buffer with a spare buffer, and tracing continues in the new
+current (=previous spare) buffer.
+
+The following debugfs files in "tracing" are related to this
+feature:
+
+  snapshot:
+
+       This is used to take a snapshot and to read the output
+       of the snapshot. Echo 1 into this file to allocate a
+       spare buffer and to take a snapshot (swap), then read
+       the snapshot from this file in the same format as
+       "trace" (described above in the section "The File
+       System"). Both reads snapshot and tracing are executable
+       in parallel. When the spare buffer is allocated, echoing
+       0 frees it, and echoing else (positive) values clear the
+       snapshot contents.
+       More details are shown in the table below.
+
+       status\input  |     0      |     1      |    else    |
+       --------------+------------+------------+------------+
+       not allocated |(do nothing)| alloc+swap |   EINVAL   |
+       --------------+------------+------------+------------+
+       allocated     |    free    |    swap    |   clear    |
+       --------------+------------+------------+------------+
+
+Here is an example of using the snapshot feature.
+
+ # echo 1 > events/sched/enable
+ # echo 1 > snapshot
+ # cat snapshot
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 71/71   #P:8
+#
+#                              _-----=> irqs-off
+#                             / _----=> need-resched
+#                            | / _---=> hardirq/softirq
+#                            || / _--=> preempt-depth
+#                            ||| /     delay
+#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+#              | |       |   ||||       |         |
+          <idle>-0     [005] d...  2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120
+           sleep-2242  [005] d...  2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120
+[...]
+          <idle>-0     [002] d...  2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120
+
+ # cat trace
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 77/77   #P:8
+#
+#                              _-----=> irqs-off
+#                             / _----=> need-resched
+#                            | / _---=> hardirq/softirq
+#                            || / _--=> preempt-depth
+#                            ||| /     delay
+#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+#              | |       |   ||||       |         |
+          <idle>-0     [007] d...  2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120
+ snapshot-test-2-2229  [002] d...  2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120
+[...]
+
+
+If you try to use this snapshot feature when current tracer is
+one of the latency tracers, you will get the following results.
+
+ # echo wakeup > current_tracer
+ # echo 1 > snapshot
+bash: echo: write error: Device or resource busy
+ # cat snapshot
+cat: snapshot: Device or resource busy
+
 -----------
 
 More details can be found in the source code, in the
index 7f8f281f2585e25ee74853bfb88183514ce3d5ca..97fb7d0365d15505d500a126588f889a7647fcd8 100644 (file)
@@ -76,6 +76,15 @@ config OPTPROBES
        depends on KPROBES && HAVE_OPTPROBES
        depends on !PREEMPT
 
+config KPROBES_ON_FTRACE
+       def_bool y
+       depends on KPROBES && HAVE_KPROBES_ON_FTRACE
+       depends on DYNAMIC_FTRACE_WITH_REGS
+       help
+        If function tracer is enabled and the arch supports full
+        passing of pt_regs to function tracing, then kprobes can
+        optimize on top of function tracing.
+
 config UPROBES
        bool "Transparent user-space probes (EXPERIMENTAL)"
        depends on UPROBE_EVENT && PERF_EVENTS
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES
 config HAVE_OPTPROBES
        bool
 
+config HAVE_KPROBES_ON_FTRACE
+       bool
+
 config HAVE_NMI_WATCHDOG
        bool
 #
index 9710be3a2d1753e7fef95287d182811e5ce5a0ba..136bba62efa48775bd8b4738f675e8796b0ee0b2 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/types.h>
 #include <asm/hw_irq.h>
+#include <linux/device.h>
 
 #define MAX_HWEVENTS           8
 #define MAX_EVENT_ALTERNATIVES 8
@@ -35,6 +36,7 @@ struct power_pmu {
        void            (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
        int             (*limited_pmc_event)(u64 event_id);
        u32             flags;
+       const struct attribute_group    **attr_groups;
        int             n_generic;
        int             *generic_events;
        int             (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
@@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  * If an event_id is not subject to the constraint expressed by a particular
  * field, then it will have 0 in both the mask and value for that field.
  */
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+                               struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ */
+#define        EVENT_VAR(_id, _suffix)         event_attr_##_id##_suffix
+#define        EVENT_PTR(_id, _suffix)         &EVENT_VAR(_id, _suffix).attr.attr
+
+#define        EVENT_ATTR(_name, _id, _suffix)                                 \
+       PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id,    \
+                       power_events_sysfs_show)
+
+#define        GENERIC_EVENT_ATTR(_name, _id)  EVENT_ATTR(_name, _id, _g)
+#define        GENERIC_EVENT_PTR(_id)          EVENT_PTR(_id, _g)
+
+#define        POWER_EVENT_ATTR(_name, _id)    EVENT_ATTR(PM_##_name, _id, _p)
+#define        POWER_EVENT_PTR(_id)            EVENT_PTR(_id, _p)
index aa2465e21f1a87b989e49eae2027fee72dc4948f..fa476d50791f28f690d77a236defafbe17896526 100644 (file)
@@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event)
        return event->hw.idx;
 }
 
+ssize_t power_events_sysfs_show(struct device *dev,
+                               struct device_attribute *attr, char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr;
+
+       pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
 struct pmu power_pmu = {
        .pmu_enable     = power_pmu_enable,
        .pmu_disable    = power_pmu_disable,
@@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
        pr_info("%s performance monitor hardware support registered\n",
                pmu->name);
 
+       power_pmu.attr_groups = ppmu->attr_groups;
+
 #ifdef MSR_HV
        /*
         * Use FCHV to ignore kernel events if MSR.HV is set.
index 2ee01e38d5e256b9a92f0ee678e08a19e768f572..b554879bd31e4c48cc6769092695c7f32a899af4 100644 (file)
 #define MMCR1_PMCSEL_SH(n)     (MMCR1_PMC1SEL_SH - (n) * 8)
 #define MMCR1_PMCSEL_MSK       0xff
 
+/*
+ * Power7 event codes.
+ */
+#define        PME_PM_CYC                      0x1e
+#define        PME_PM_GCT_NOSLOT_CYC           0x100f8
+#define        PME_PM_CMPLU_STALL              0x4000a
+#define        PME_PM_INST_CMPL                0x2
+#define        PME_PM_LD_REF_L1                0xc880
+#define        PME_PM_LD_MISS_L1               0x400f0
+#define        PME_PM_BRU_FIN                  0x10068
+#define        PME_PM_BRU_MPRED                0x400f6
+
 /*
  * Layout of constraint bits:
  * 6666555555555544444444443333333333222222222211111111110000000000
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
 }
 
 static int power7_generic_events[] = {
-       [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
-       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */
-       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a,  /* CMPLU_STALL */
-       [PERF_COUNT_HW_INSTRUCTIONS] = 2,
-       [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880,      /* LD_REF_L1_LSU*/
-       [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0,         /* LD_MISS_L1   */
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068,  /* BRU_FIN      */
-       [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6,        /* BR_MPRED     */
+       [PERF_COUNT_HW_CPU_CYCLES] =                    PME_PM_CYC,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =       PME_PM_GCT_NOSLOT_CYC,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =        PME_PM_CMPLU_STALL,
+       [PERF_COUNT_HW_INSTRUCTIONS] =                  PME_PM_INST_CMPL,
+       [PERF_COUNT_HW_CACHE_REFERENCES] =              PME_PM_LD_REF_L1,
+       [PERF_COUNT_HW_CACHE_MISSES] =                  PME_PM_LD_MISS_L1,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =           PME_PM_BRU_FIN,
+       [PERF_COUNT_HW_BRANCH_MISSES] =                 PME_PM_BRU_MPRED,
 };
 
 #define C(x)   PERF_COUNT_HW_CACHE_##x
@@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        },
 };
 
+
+GENERIC_EVENT_ATTR(cpu-cycles,                 CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend,    GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend,     CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions,               INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references,           LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses,               LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions,                BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses,              BRU_MPRED);
+
+POWER_EVENT_ATTR(CYC,                          CYC);
+POWER_EVENT_ATTR(GCT_NOSLOT_CYC,               GCT_NOSLOT_CYC);
+POWER_EVENT_ATTR(CMPLU_STALL,                  CMPLU_STALL);
+POWER_EVENT_ATTR(INST_CMPL,                    INST_CMPL);
+POWER_EVENT_ATTR(LD_REF_L1,                    LD_REF_L1);
+POWER_EVENT_ATTR(LD_MISS_L1,                   LD_MISS_L1);
+POWER_EVENT_ATTR(BRU_FIN,                      BRU_FIN)
+POWER_EVENT_ATTR(BRU_MPRED,                    BRU_MPRED);
+
+static struct attribute *power7_events_attr[] = {
+       GENERIC_EVENT_PTR(CYC),
+       GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
+       GENERIC_EVENT_PTR(CMPLU_STALL),
+       GENERIC_EVENT_PTR(INST_CMPL),
+       GENERIC_EVENT_PTR(LD_REF_L1),
+       GENERIC_EVENT_PTR(LD_MISS_L1),
+       GENERIC_EVENT_PTR(BRU_FIN),
+       GENERIC_EVENT_PTR(BRU_MPRED),
+
+       POWER_EVENT_PTR(CYC),
+       POWER_EVENT_PTR(GCT_NOSLOT_CYC),
+       POWER_EVENT_PTR(CMPLU_STALL),
+       POWER_EVENT_PTR(INST_CMPL),
+       POWER_EVENT_PTR(LD_REF_L1),
+       POWER_EVENT_PTR(LD_MISS_L1),
+       POWER_EVENT_PTR(BRU_FIN),
+       POWER_EVENT_PTR(BRU_MPRED),
+       NULL
+};
+
+
+static struct attribute_group power7_pmu_events_group = {
+       .name = "events",
+       .attrs = power7_events_attr,
+};
+
+static const struct attribute_group *power7_pmu_attr_groups[] = {
+       &power7_pmu_events_group,
+       NULL,
+};
+
 static struct power_pmu power7_pmu = {
        .name                   = "POWER7",
        .n_counter              = 6,
@@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = {
        .get_alternatives       = power7_get_alternatives,
        .disable_pmc            = power7_disable_pmc,
        .flags                  = PPMU_ALT_SIPR,
+       .attr_groups            = power7_pmu_attr_groups,
        .n_generic              = ARRAY_SIZE(power7_generic_events),
        .generic_events         = power7_generic_events,
        .cache_events           = &power7_cache_events,
index 36b05ed0bb3cd3b53857e62f74e00db1a6a068ac..a2570490eab6114b400324e858b2b676b7ac3708 100644 (file)
@@ -39,10 +39,12 @@ config X86
        select HAVE_DMA_CONTIGUOUS if !SWIOTLB
        select HAVE_KRETPROBES
        select HAVE_OPTPROBES
+       select HAVE_KPROBES_ON_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FENTRY if X86_64
        select HAVE_C_RECORDMCOUNT
        select HAVE_DYNAMIC_FTRACE
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_GRAPH_FP_TEST
index 2d9075e863a0cfc25e43b7d40e706e8016586859..93fe929d1cee20ec1dc183d5c390e5d9325b26ce 100644 (file)
 #define X86_FEATURE_TBM                (6*32+21) /* trailing bit manipulations */
 #define X86_FEATURE_TOPOEXT    (6*32+22) /* topology extensions CPUID leafs */
 #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB  (6*32+24) /* NB performance counter extensions */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
 #define cpu_has_pclmulqdq      boot_cpu_has(X86_FEATURE_PCLMULQDQ)
 #define cpu_has_perfctr_core   boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_perfctr_nb     boot_cpu_has(X86_FEATURE_PERFCTR_NB)
 #define cpu_has_cx8            boot_cpu_has(X86_FEATURE_CX8)
 #define cpu_has_cx16           boot_cpu_has(X86_FEATURE_CX16)
 #define cpu_has_eager_fpu      boot_cpu_has(X86_FEATURE_EAGER_FPU)
index 9a25b522d37799adf0b7a00d898ba30cb70b08b7..86cb51e1ca96abc11dda6317293a775aebbdf51e 100644 (file)
@@ -44,7 +44,6 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define ARCH_SUPPORTS_FTRACE_OPS 1
-#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
 #endif
 
 #ifndef __ASSEMBLY__
index 4fabcdf1cfa74b6f6c7c04327e43fb5c7c1466ac..57cb634022136f39f3c9b4eebe9390b917717d89 100644 (file)
 #define ARCH_PERFMON_EVENTSEL_INV                      (1ULL << 23)
 #define ARCH_PERFMON_EVENTSEL_CMASK                    0xFF000000ULL
 
-#define AMD_PERFMON_EVENTSEL_GUESTONLY                 (1ULL << 40)
-#define AMD_PERFMON_EVENTSEL_HOSTONLY                  (1ULL << 41)
+#define AMD64_EVENTSEL_INT_CORE_ENABLE                 (1ULL << 36)
+#define AMD64_EVENTSEL_GUESTONLY                       (1ULL << 40)
+#define AMD64_EVENTSEL_HOSTONLY                                (1ULL << 41)
+
+#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT              37
+#define AMD64_EVENTSEL_INT_CORE_SEL_MASK               \
+       (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
 
 #define AMD64_EVENTSEL_EVENT   \
        (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
 #define AMD64_RAW_EVENT_MASK           \
        (X86_RAW_EVENT_MASK          |  \
         AMD64_EVENTSEL_EVENT)
+#define AMD64_RAW_EVENT_MASK_NB                \
+       (AMD64_EVENTSEL_EVENT        |  \
+        ARCH_PERFMON_EVENTSEL_UMASK)
 #define AMD64_NUM_COUNTERS                             4
 #define AMD64_NUM_COUNTERS_CORE                                6
+#define AMD64_NUM_COUNTERS_NB                          4
 
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL          0x3c
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK                (0x00 << 8)
index 433a59fb1a7411b97cd0db54d30a3bc727a4aeea..075a402555912541fe0b502c6c6b877812a5b089 100644 (file)
 /* Fam 15h MSRs */
 #define MSR_F15H_PERF_CTL              0xc0010200
 #define MSR_F15H_PERF_CTR              0xc0010201
+#define MSR_F15H_NB_PERF_CTL           0xc0010240
+#define MSR_F15H_NB_PERF_CTR           0xc0010241
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
index 34e923a537628777aa4e5ac90741bef6ef575633..ac3b3d002833024a6641ffbbc5d7ead7a99ca23b 100644 (file)
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC)         += trace_clock.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
-obj-$(CONFIG_KPROBES)          += kprobes.o
-obj-$(CONFIG_OPTPROBES)                += kprobes-opt.o
+obj-y                          += kprobes/
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_DOUBLEFAULT)      += doublefault_32.o
 obj-$(CONFIG_KGDB)             += kgdb.o
index 6774c17a55766dec97219ddf2798ac4c740a2963..bf0f01aea99449e53a87f092a84b3263263da911 100644 (file)
@@ -829,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
        } else {
                hwc->config_base = x86_pmu_config_addr(hwc->idx);
                hwc->event_base  = x86_pmu_event_addr(hwc->idx);
-               hwc->event_base_rdpmc = hwc->idx;
+               hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
        }
 }
 
@@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = {
        .attrs = NULL,
 };
 
-struct perf_pmu_events_attr {
-       struct device_attribute attr;
-       u64 id;
-};
-
 /*
  * Remove all undefined events (x86_pmu.event_map(id) == 0)
  * out of events_attr attributes.
@@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
 
-#define EVENT_ATTR(_name, _id)                                 \
-static struct perf_pmu_events_attr EVENT_VAR(_id) = {          \
-       .attr = __ATTR(_name, 0444, events_sysfs_show, NULL),   \
-       .id   =  PERF_COUNT_HW_##_id,                           \
-};
+#define EVENT_ATTR(_name, _id)                                         \
+       PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id,      \
+                       events_sysfs_show)
 
 EVENT_ATTR(cpu-cycles,                 CPU_CYCLES              );
 EVENT_ATTR(instructions,               INSTRUCTIONS            );
index 115c1ea977469b0fda5b43f76e8595575effb24b..7f5c75c2afdd9552653ffd7c9babb8c871669b05 100644 (file)
@@ -325,6 +325,8 @@ struct x86_pmu {
        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
+       int             (*addr_offset)(int index, bool eventsel);
+       int             (*rdpmc_index)(int index);
        u64             (*event_map)(int);
        int             max_events;
        int             num_counters;
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs
 
 u64 x86_perf_event_update(struct perf_event *event);
 
-static inline int x86_pmu_addr_offset(int index)
+static inline unsigned int x86_pmu_config_addr(int index)
 {
-       int offset;
-
-       /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
-       alternative_io(ASM_NOP2,
-                      "shll $1, %%eax",
-                      X86_FEATURE_PERFCTR_CORE,
-                      "=a" (offset),
-                      "a"  (index));
-
-       return offset;
+       return x86_pmu.eventsel + (x86_pmu.addr_offset ?
+                                  x86_pmu.addr_offset(index, true) : index);
 }
 
-static inline unsigned int x86_pmu_config_addr(int index)
+static inline unsigned int x86_pmu_event_addr(int index)
 {
-       return x86_pmu.eventsel + x86_pmu_addr_offset(index);
+       return x86_pmu.perfctr + (x86_pmu.addr_offset ?
+                                 x86_pmu.addr_offset(index, false) : index);
 }
 
-static inline unsigned int x86_pmu_event_addr(int index)
+static inline int x86_pmu_rdpmc_index(int index)
 {
-       return x86_pmu.perfctr + x86_pmu_addr_offset(index);
+       return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 }
 
 int x86_setup_perfctr(struct perf_event *event);
index c93bc4e813a0c10303984db79cafc1d6ee5a6caf..dfdab42aed27372503d5fe33cd29a4ef97ae580b 100644 (file)
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event)
        return amd_perfmon_event_map[hw_event];
 }
 
-static int amd_pmu_hw_config(struct perf_event *event)
+static struct event_constraint *amd_nb_event_constraint;
+
+/*
+ * Previously calculated offsets
+ */
+static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
+static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
+static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
+
+/*
+ * Legacy CPUs:
+ *   4 counters starting at 0xc0010000 each offset by 1
+ *
+ * CPUs with core performance counter extensions:
+ *   6 counters starting at 0xc0010200 each offset by 2
+ *
+ * CPUs with north bridge performance counter extensions:
+ *   4 additional counters starting at 0xc0010240 each offset by 2
+ *   (indexed right above either one of the above core counters)
+ */
+static inline int amd_pmu_addr_offset(int index, bool eventsel)
 {
-       int ret;
+       int offset, first, base;
 
-       /* pass precise event sampling to ibs: */
-       if (event->attr.precise_ip && get_ibs_caps())
-               return -ENOENT;
+       if (!index)
+               return index;
+
+       if (eventsel)
+               offset = event_offsets[index];
+       else
+               offset = count_offsets[index];
+
+       if (offset)
+               return offset;
+
+       if (amd_nb_event_constraint &&
+           test_bit(index, amd_nb_event_constraint->idxmsk)) {
+               /*
+                * calculate the offset of NB counters with respect to
+                * base eventsel or perfctr
+                */
+
+               first = find_first_bit(amd_nb_event_constraint->idxmsk,
+                                      X86_PMC_IDX_MAX);
+
+               if (eventsel)
+                       base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
+               else
+                       base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
+
+               offset = base + ((index - first) << 1);
+       } else if (!cpu_has_perfctr_core)
+               offset = index;
+       else
+               offset = index << 1;
+
+       if (eventsel)
+               event_offsets[index] = offset;
+       else
+               count_offsets[index] = offset;
+
+       return offset;
+}
+
+static inline int amd_pmu_rdpmc_index(int index)
+{
+       int ret, first;
+
+       if (!index)
+               return index;
+
+       ret = rdpmc_indexes[index];
 
-       ret = x86_pmu_hw_config(event);
        if (ret)
                return ret;
 
-       if (has_branch_stack(event))
-               return -EOPNOTSUPP;
+       if (amd_nb_event_constraint &&
+           test_bit(index, amd_nb_event_constraint->idxmsk)) {
+               /*
+                * according to the mnual, ECX value of the NB counters is
+                * the index of the NB counter (0, 1, 2 or 3) plus 6
+                */
+
+               first = find_first_bit(amd_nb_event_constraint->idxmsk,
+                                      X86_PMC_IDX_MAX);
+               ret = index - first + 6;
+       } else
+               ret = index;
+
+       rdpmc_indexes[index] = ret;
+
+       return ret;
+}
 
+static int amd_core_hw_config(struct perf_event *event)
+{
        if (event->attr.exclude_host && event->attr.exclude_guest)
                /*
                 * When HO == GO == 1 the hardware treats that as GO == HO == 0
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event)
                event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
                                      ARCH_PERFMON_EVENTSEL_OS);
        else if (event->attr.exclude_host)
-               event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
+               event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
        else if (event->attr.exclude_guest)
-               event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
+               event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
+
+       return 0;
+}
+
+/*
+ * NB counters do not support the following event select bits:
+ *   Host/Guest only
+ *   Counter mask
+ *   Invert counter mask
+ *   Edge detect
+ *   OS/User mode
+ */
+static int amd_nb_hw_config(struct perf_event *event)
+{
+       /* for NB, we only allow system wide counting mode */
+       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+               return -EINVAL;
+
+       if (event->attr.exclude_user || event->attr.exclude_kernel ||
+           event->attr.exclude_host || event->attr.exclude_guest)
+               return -EINVAL;
 
-       if (event->attr.type != PERF_TYPE_RAW)
-               return 0;
+       event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
+                             ARCH_PERFMON_EVENTSEL_OS);
 
-       event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+       if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
+                                ARCH_PERFMON_EVENTSEL_INT))
+               return -EINVAL;
 
        return 0;
 }
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
        return (hwc->config & 0xe0) == 0xe0;
 }
 
+static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
+{
+       return amd_nb_event_constraint && amd_is_nb_event(hwc);
+}
+
 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
 {
        struct amd_nb *nb = cpuc->amd_nb;
@@ -188,19 +297,36 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc)
        return nb && nb->nb_id != -1;
 }
 
-static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
-                                     struct perf_event *event)
+static int amd_pmu_hw_config(struct perf_event *event)
+{
+       int ret;
+
+       /* pass precise event sampling to ibs: */
+       if (event->attr.precise_ip && get_ibs_caps())
+               return -ENOENT;
+
+       if (has_branch_stack(event))
+               return -EOPNOTSUPP;
+
+       ret = x86_pmu_hw_config(event);
+       if (ret)
+               return ret;
+
+       if (event->attr.type == PERF_TYPE_RAW)
+               event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+
+       if (amd_is_perfctr_nb_event(&event->hw))
+               return amd_nb_hw_config(event);
+
+       return amd_core_hw_config(event);
+}
+
+static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
+                                          struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
        struct amd_nb *nb = cpuc->amd_nb;
        int i;
 
-       /*
-        * only care about NB events
-        */
-       if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
-               return;
-
        /*
         * need to scan whole list because event may not have
         * been assigned during scheduling
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
        }
 }
 
+static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
+{
+       int core_id = cpu_data(smp_processor_id()).cpu_core_id;
+
+       /* deliver interrupts only to this core */
+       if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
+               hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
+               hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
+               hwc->config |= (u64)(core_id) <<
+                       AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
+       }
+}
+
  /*
   * AMD64 NorthBridge events need special treatment because
   * counter access needs to be synchronized across all cores
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
   *
   * Given that resources are allocated (cmpxchg), they must be
   * eventually freed for others to use. This is accomplished by
-  * calling amd_put_event_constraints().
+  * calling __amd_put_nb_event_constraints()
   *
   * Non NB events are not impacted by this restriction.
   */
 static struct event_constraint *
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+                              struct event_constraint *c)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct amd_nb *nb = cpuc->amd_nb;
-       struct perf_event *old = NULL;
-       int max = x86_pmu.num_counters;
-       int i, j, k = -1;
+       struct perf_event *old;
+       int idx, new = -1;
 
-       /*
-        * if not NB event or no NB, then no constraints
-        */
-       if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
-               return &unconstrained;
+       if (!c)
+               c = &unconstrained;
+
+       if (cpuc->is_fake)
+               return c;
 
        /*
         * detect if already present, if so reuse
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
         * because of successive calls to x86_schedule_events() from
         * hw_perf_group_sched_in() without hw_perf_enable()
         */
-       for (i = 0; i < max; i++) {
-               /*
-                * keep track of first free slot
-                */
-               if (k == -1 && !nb->owners[i])
-                       k = i;
+       for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
+               if (new == -1 || hwc->idx == idx)
+                       /* assign free slot, prefer hwc->idx */
+                       old = cmpxchg(nb->owners + idx, NULL, event);
+               else if (nb->owners[idx] == event)
+                       /* event already present */
+                       old = event;
+               else
+                       continue;
+
+               if (old && old != event)
+                       continue;
+
+               /* reassign to this slot */
+               if (new != -1)
+                       cmpxchg(nb->owners + new, event, NULL);
+               new = idx;
 
                /* already present, reuse */
-               if (nb->owners[i] == event)
-                       goto done;
-       }
-       /*
-        * not present, so grab a new slot
-        * starting either at:
-        */
-       if (hwc->idx != -1) {
-               /* previous assignment */
-               i = hwc->idx;
-       } else if (k != -1) {
-               /* start from free slot found */
-               i = k;
-       } else {
-               /*
-                * event not found, no slot found in
-                * first pass, try again from the
-                * beginning
-                */
-               i = 0;
-       }
-       j = i;
-       do {
-               old = cmpxchg(nb->owners+i, NULL, event);
-               if (!old)
+               if (old == event)
                        break;
-               if (++i == max)
-                       i = 0;
-       } while (i != j);
-done:
-       if (!old)
-               return &nb->event_constraints[i];
-
-       return &emptyconstraint;
+       }
+
+       if (new == -1)
+               return &emptyconstraint;
+
+       if (amd_is_perfctr_nb_event(hwc))
+               amd_nb_interrupt_hw_config(hwc);
+
+       return &nb->event_constraints[new];
 }
 
 static struct amd_nb *amd_alloc_nb(int cpu)
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu)
        struct amd_nb *nb;
        int i, nb_id;
 
-       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+       cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        if (boot_cpu_data.x86_max_cores < 2)
                return;
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu)
        }
 }
 
+static struct event_constraint *
+amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+       /*
+        * if not NB event or no NB, then no constraints
+        */
+       if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
+               return &unconstrained;
+
+       return __amd_get_nb_event_constraints(cpuc, event,
+                                             amd_nb_event_constraint);
+}
+
+static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
+                                     struct perf_event *event)
+{
+       if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
+               __amd_put_nb_event_constraints(cpuc, event);
+}
+
 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 
+static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
+static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
+
 static struct event_constraint *
 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
                        return &amd_f15_PMC20;
                }
        case AMD_EVENT_NB:
-               /* not yet implemented */
-               return &emptyconstraint;
+               return __amd_get_nb_event_constraints(cpuc, event,
+                                                     amd_nb_event_constraint);
        default:
                return &emptyconstraint;
        }
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = {
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
        .perfctr                = MSR_K7_PERFCTR0,
+       .addr_offset            = amd_pmu_addr_offset,
+       .rdpmc_index            = amd_pmu_rdpmc_index,
        .event_map              = amd_pmu_event_map,
        .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
        .num_counters           = AMD64_NUM_COUNTERS,
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = {
 
 static int setup_event_constraints(void)
 {
-       if (boot_cpu_data.x86 >= 0x15)
+       if (boot_cpu_data.x86 == 0x15)
                x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
        return 0;
 }
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void)
        return 0;
 }
 
+static int setup_perfctr_nb(void)
+{
+       if (!cpu_has_perfctr_nb)
+               return -ENODEV;
+
+       x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
+
+       if (cpu_has_perfctr_core)
+               amd_nb_event_constraint = &amd_NBPMC96;
+       else
+               amd_nb_event_constraint = &amd_NBPMC74;
+
+       printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
+
+       return 0;
+}
+
 __init int amd_pmu_init(void)
 {
        /* Performance-monitoring supported from K7 and later: */
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void)
 
        setup_event_constraints();
        setup_perfctr_core();
+       setup_perfctr_nb();
 
        /* Events are common for all AMDs */
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void)
         * SVM is disabled the Guest-only bits still gets set and the counter
         * will not count anything.
         */
-       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+       cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        /* Reload all events */
        x86_pmu_disable_all();
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h
deleted file mode 100644 (file)
index 3230b68..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef __X86_KERNEL_KPROBES_COMMON_H
-#define __X86_KERNEL_KPROBES_COMMON_H
-
-/* Kprobes and Optprobes common header */
-
-#ifdef CONFIG_X86_64
-#define SAVE_REGS_STRING                       \
-       /* Skip cs, ip, orig_ax. */             \
-       "       subq $24, %rsp\n"               \
-       "       pushq %rdi\n"                   \
-       "       pushq %rsi\n"                   \
-       "       pushq %rdx\n"                   \
-       "       pushq %rcx\n"                   \
-       "       pushq %rax\n"                   \
-       "       pushq %r8\n"                    \
-       "       pushq %r9\n"                    \
-       "       pushq %r10\n"                   \
-       "       pushq %r11\n"                   \
-       "       pushq %rbx\n"                   \
-       "       pushq %rbp\n"                   \
-       "       pushq %r12\n"                   \
-       "       pushq %r13\n"                   \
-       "       pushq %r14\n"                   \
-       "       pushq %r15\n"
-#define RESTORE_REGS_STRING                    \
-       "       popq %r15\n"                    \
-       "       popq %r14\n"                    \
-       "       popq %r13\n"                    \
-       "       popq %r12\n"                    \
-       "       popq %rbp\n"                    \
-       "       popq %rbx\n"                    \
-       "       popq %r11\n"                    \
-       "       popq %r10\n"                    \
-       "       popq %r9\n"                     \
-       "       popq %r8\n"                     \
-       "       popq %rax\n"                    \
-       "       popq %rcx\n"                    \
-       "       popq %rdx\n"                    \
-       "       popq %rsi\n"                    \
-       "       popq %rdi\n"                    \
-       /* Skip orig_ax, ip, cs */              \
-       "       addq $24, %rsp\n"
-#else
-#define SAVE_REGS_STRING                       \
-       /* Skip cs, ip, orig_ax and gs. */      \
-       "       subl $16, %esp\n"               \
-       "       pushl %fs\n"                    \
-       "       pushl %es\n"                    \
-       "       pushl %ds\n"                    \
-       "       pushl %eax\n"                   \
-       "       pushl %ebp\n"                   \
-       "       pushl %edi\n"                   \
-       "       pushl %esi\n"                   \
-       "       pushl %edx\n"                   \
-       "       pushl %ecx\n"                   \
-       "       pushl %ebx\n"
-#define RESTORE_REGS_STRING                    \
-       "       popl %ebx\n"                    \
-       "       popl %ecx\n"                    \
-       "       popl %edx\n"                    \
-       "       popl %esi\n"                    \
-       "       popl %edi\n"                    \
-       "       popl %ebp\n"                    \
-       "       popl %eax\n"                    \
-       /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
-       "       addl $24, %esp\n"
-#endif
-
-/* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
-/* Recover instruction if given address is probed */
-extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
-                                        unsigned long addr);
-/*
- * Copy an instruction and adjust the displacement if the instruction
- * uses the %rip-relative addressing mode.
- */
-extern int __copy_instruction(u8 *dest, u8 *src);
-
-/* Generate a relative-jump/call instruction */
-extern void synthesize_reljump(void *from, void *to);
-extern void synthesize_relcall(void *from, void *to);
-
-#ifdef CONFIG_OPTPROBES
-extern int arch_init_optprobes(void);
-extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
-extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
-#else  /* !CONFIG_OPTPROBES */
-static inline int arch_init_optprobes(void)
-{
-       return 0;
-}
-static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
-{
-       return 0;
-}
-static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
-{
-       return addr;
-}
-#endif
-#endif
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
deleted file mode 100644 (file)
index c5e410e..0000000
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- *  Kernel Probes Jump Optimization (Optprobes)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- * Copyright (C) Hitachi Ltd., 2012
- */
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/hardirq.h>
-#include <linux/preempt.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-#include <linux/kallsyms.h>
-#include <linux/ftrace.h>
-
-#include <asm/cacheflush.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/insn.h>
-#include <asm/debugreg.h>
-
-#include "kprobes-common.h"
-
-unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
-{
-       struct optimized_kprobe *op;
-       struct kprobe *kp;
-       long offs;
-       int i;
-
-       for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
-               kp = get_kprobe((void *)addr - i);
-               /* This function only handles jump-optimized kprobe */
-               if (kp && kprobe_optimized(kp)) {
-                       op = container_of(kp, struct optimized_kprobe, kp);
-                       /* If op->list is not empty, op is under optimizing */
-                       if (list_empty(&op->list))
-                               goto found;
-               }
-       }
-
-       return addr;
-found:
-       /*
-        * If the kprobe can be optimized, original bytes which can be
-        * overwritten by jump destination address. In this case, original
-        * bytes must be recovered from op->optinsn.copied_insn buffer.
-        */
-       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-       if (addr == (unsigned long)kp->addr) {
-               buf[0] = kp->opcode;
-               memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-       } else {
-               offs = addr - (unsigned long)kp->addr - 1;
-               memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
-       }
-
-       return (unsigned long)buf;
-}
-
-/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
-static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
-{
-#ifdef CONFIG_X86_64
-       *addr++ = 0x48;
-       *addr++ = 0xbf;
-#else
-       *addr++ = 0xb8;
-#endif
-       *(unsigned long *)addr = val;
-}
-
-static void __used __kprobes kprobes_optinsn_template_holder(void)
-{
-       asm volatile (
-                       ".global optprobe_template_entry\n"
-                       "optprobe_template_entry:\n"
-#ifdef CONFIG_X86_64
-                       /* We don't bother saving the ss register */
-                       "       pushq %rsp\n"
-                       "       pushfq\n"
-                       SAVE_REGS_STRING
-                       "       movq %rsp, %rsi\n"
-                       ".global optprobe_template_val\n"
-                       "optprobe_template_val:\n"
-                       ASM_NOP5
-                       ASM_NOP5
-                       ".global optprobe_template_call\n"
-                       "optprobe_template_call:\n"
-                       ASM_NOP5
-                       /* Move flags to rsp */
-                       "       movq 144(%rsp), %rdx\n"
-                       "       movq %rdx, 152(%rsp)\n"
-                       RESTORE_REGS_STRING
-                       /* Skip flags entry */
-                       "       addq $8, %rsp\n"
-                       "       popfq\n"
-#else /* CONFIG_X86_32 */
-                       "       pushf\n"
-                       SAVE_REGS_STRING
-                       "       movl %esp, %edx\n"
-                       ".global optprobe_template_val\n"
-                       "optprobe_template_val:\n"
-                       ASM_NOP5
-                       ".global optprobe_template_call\n"
-                       "optprobe_template_call:\n"
-                       ASM_NOP5
-                       RESTORE_REGS_STRING
-                       "       addl $4, %esp\n"        /* skip cs */
-                       "       popf\n"
-#endif
-                       ".global optprobe_template_end\n"
-                       "optprobe_template_end:\n");
-}
-
-#define TMPL_MOVE_IDX \
-       ((long)&optprobe_template_val - (long)&optprobe_template_entry)
-#define TMPL_CALL_IDX \
-       ((long)&optprobe_template_call - (long)&optprobe_template_entry)
-#define TMPL_END_IDX \
-       ((long)&optprobe_template_end - (long)&optprobe_template_entry)
-
-#define INT3_SIZE sizeof(kprobe_opcode_t)
-
-/* Optimized kprobe call back function: called from optinsn */
-static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long flags;
-
-       /* This is possible if op is under delayed unoptimizing */
-       if (kprobe_disabled(&op->kp))
-               return;
-
-       local_irq_save(flags);
-       if (kprobe_running()) {
-               kprobes_inc_nmissed_count(&op->kp);
-       } else {
-               /* Save skipped registers */
-#ifdef CONFIG_X86_64
-               regs->cs = __KERNEL_CS;
-#else
-               regs->cs = __KERNEL_CS | get_kernel_rpl();
-               regs->gs = 0;
-#endif
-               regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
-               regs->orig_ax = ~0UL;
-
-               __this_cpu_write(current_kprobe, &op->kp);
-               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               opt_pre_handler(&op->kp, regs);
-               __this_cpu_write(current_kprobe, NULL);
-       }
-       local_irq_restore(flags);
-}
-
-static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
-{
-       int len = 0, ret;
-
-       while (len < RELATIVEJUMP_SIZE) {
-               ret = __copy_instruction(dest + len, src + len);
-               if (!ret || !can_boost(dest + len))
-                       return -EINVAL;
-               len += ret;
-       }
-       /* Check whether the address range is reserved */
-       if (ftrace_text_reserved(src, src + len - 1) ||
-           alternatives_text_reserved(src, src + len - 1) ||
-           jump_label_text_reserved(src, src + len - 1))
-               return -EBUSY;
-
-       return len;
-}
-
-/* Check whether insn is indirect jump */
-static int __kprobes insn_is_indirect_jump(struct insn *insn)
-{
-       return ((insn->opcode.bytes[0] == 0xff &&
-               (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
-               insn->opcode.bytes[0] == 0xea); /* Segment based jump */
-}
-
-/* Check whether insn jumps into specified address range */
-static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
-{
-       unsigned long target = 0;
-
-       switch (insn->opcode.bytes[0]) {
-       case 0xe0:      /* loopne */
-       case 0xe1:      /* loope */
-       case 0xe2:      /* loop */
-       case 0xe3:      /* jcxz */
-       case 0xe9:      /* near relative jump */
-       case 0xeb:      /* short relative jump */
-               break;
-       case 0x0f:
-               if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
-                       break;
-               return 0;
-       default:
-               if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
-                       break;
-               return 0;
-       }
-       target = (unsigned long)insn->next_byte + insn->immediate.value;
-
-       return (start <= target && target <= start + len);
-}
-
-/* Decode whole function to ensure any instructions don't jump into target */
-static int __kprobes can_optimize(unsigned long paddr)
-{
-       unsigned long addr, size = 0, offset = 0;
-       struct insn insn;
-       kprobe_opcode_t buf[MAX_INSN_SIZE];
-
-       /* Lookup symbol including addr */
-       if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
-               return 0;
-
-       /*
-        * Do not optimize in the entry code due to the unstable
-        * stack handling.
-        */
-       if ((paddr >= (unsigned long)__entry_text_start) &&
-           (paddr <  (unsigned long)__entry_text_end))
-               return 0;
-
-       /* Check there is enough space for a relative jump. */
-       if (size - offset < RELATIVEJUMP_SIZE)
-               return 0;
-
-       /* Decode instructions */
-       addr = paddr - offset;
-       while (addr < paddr - offset + size) { /* Decode until function end */
-               if (search_exception_tables(addr))
-                       /*
-                        * Since some fixup code will jumps into this function,
-                        * we can't optimize kprobe in this function.
-                        */
-                       return 0;
-               kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
-               insn_get_length(&insn);
-               /* Another subsystem puts a breakpoint */
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
-                       return 0;
-               /* Recover address */
-               insn.kaddr = (void *)addr;
-               insn.next_byte = (void *)(addr + insn.length);
-               /* Check any instructions don't jump into target */
-               if (insn_is_indirect_jump(&insn) ||
-                   insn_jump_into_range(&insn, paddr + INT3_SIZE,
-                                        RELATIVE_ADDR_SIZE))
-                       return 0;
-               addr += insn.length;
-       }
-
-       return 1;
-}
-
-/* Check optimized_kprobe can actually be optimized. */
-int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
-{
-       int i;
-       struct kprobe *p;
-
-       for (i = 1; i < op->optinsn.size; i++) {
-               p = get_kprobe(op->kp.addr + i);
-               if (p && !kprobe_disabled(p))
-                       return -EEXIST;
-       }
-
-       return 0;
-}
-
-/* Check the addr is within the optimized instructions. */
-int __kprobes
-arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
-{
-       return ((unsigned long)op->kp.addr <= addr &&
-               (unsigned long)op->kp.addr + op->optinsn.size > addr);
-}
-
-/* Free optimized instruction slot */
-static __kprobes
-void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
-{
-       if (op->optinsn.insn) {
-               free_optinsn_slot(op->optinsn.insn, dirty);
-               op->optinsn.insn = NULL;
-               op->optinsn.size = 0;
-       }
-}
-
-void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
-{
-       __arch_remove_optimized_kprobe(op, 1);
-}
-
-/*
- * Copy replacing target instructions
- * Target instructions MUST be relocatable (checked inside)
- * This is called when new aggr(opt)probe is allocated or reused.
- */
-int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
-{
-       u8 *buf;
-       int ret;
-       long rel;
-
-       if (!can_optimize((unsigned long)op->kp.addr))
-               return -EILSEQ;
-
-       op->optinsn.insn = get_optinsn_slot();
-       if (!op->optinsn.insn)
-               return -ENOMEM;
-
-       /*
-        * Verify if the address gap is in 2GB range, because this uses
-        * a relative jump.
-        */
-       rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
-       if (abs(rel) > 0x7fffffff)
-               return -ERANGE;
-
-       buf = (u8 *)op->optinsn.insn;
-
-       /* Copy instructions into the out-of-line buffer */
-       ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
-       if (ret < 0) {
-               __arch_remove_optimized_kprobe(op, 0);
-               return ret;
-       }
-       op->optinsn.size = ret;
-
-       /* Copy arch-dep-instance from template */
-       memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
-
-       /* Set probe information */
-       synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
-
-       /* Set probe function call */
-       synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
-
-       /* Set returning jmp instruction at the tail of out-of-line buffer */
-       synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
-                          (u8 *)op->kp.addr + op->optinsn.size);
-
-       flush_icache_range((unsigned long) buf,
-                          (unsigned long) buf + TMPL_END_IDX +
-                          op->optinsn.size + RELATIVEJUMP_SIZE);
-       return 0;
-}
-
-#define MAX_OPTIMIZE_PROBES 256
-static struct text_poke_param *jump_poke_params;
-static struct jump_poke_buffer {
-       u8 buf[RELATIVEJUMP_SIZE];
-} *jump_poke_bufs;
-
-static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
-                                           u8 *insn_buf,
-                                           struct optimized_kprobe *op)
-{
-       s32 rel = (s32)((long)op->optinsn.insn -
-                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
-
-       /* Backup instructions which will be replaced by jump address */
-       memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
-              RELATIVE_ADDR_SIZE);
-
-       insn_buf[0] = RELATIVEJUMP_OPCODE;
-       *(s32 *)(&insn_buf[1]) = rel;
-
-       tprm->addr = op->kp.addr;
-       tprm->opcode = insn_buf;
-       tprm->len = RELATIVEJUMP_SIZE;
-}
-
-/*
- * Replace breakpoints (int3) with relative jumps.
- * Caller must call with locking kprobe_mutex and text_mutex.
- */
-void __kprobes arch_optimize_kprobes(struct list_head *oplist)
-{
-       struct optimized_kprobe *op, *tmp;
-       int c = 0;
-
-       list_for_each_entry_safe(op, tmp, oplist, list) {
-               WARN_ON(kprobe_disabled(&op->kp));
-               /* Setup param */
-               setup_optimize_kprobe(&jump_poke_params[c],
-                                     jump_poke_bufs[c].buf, op);
-               list_del_init(&op->list);
-               if (++c >= MAX_OPTIMIZE_PROBES)
-                       break;
-       }
-
-       /*
-        * text_poke_smp doesn't support NMI/MCE code modifying.
-        * However, since kprobes itself also doesn't support NMI/MCE
-        * code probing, it's not a problem.
-        */
-       text_poke_smp_batch(jump_poke_params, c);
-}
-
-static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
-                                             u8 *insn_buf,
-                                             struct optimized_kprobe *op)
-{
-       /* Set int3 to first byte for kprobes */
-       insn_buf[0] = BREAKPOINT_INSTRUCTION;
-       memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-
-       tprm->addr = op->kp.addr;
-       tprm->opcode = insn_buf;
-       tprm->len = RELATIVEJUMP_SIZE;
-}
-
-/*
- * Recover original instructions and breakpoints from relative jumps.
- * Caller must call with locking kprobe_mutex.
- */
-extern void arch_unoptimize_kprobes(struct list_head *oplist,
-                                   struct list_head *done_list)
-{
-       struct optimized_kprobe *op, *tmp;
-       int c = 0;
-
-       list_for_each_entry_safe(op, tmp, oplist, list) {
-               /* Setup param */
-               setup_unoptimize_kprobe(&jump_poke_params[c],
-                                       jump_poke_bufs[c].buf, op);
-               list_move(&op->list, done_list);
-               if (++c >= MAX_OPTIMIZE_PROBES)
-                       break;
-       }
-
-       /*
-        * text_poke_smp doesn't support NMI/MCE code modifying.
-        * However, since kprobes itself also doesn't support NMI/MCE
-        * code probing, it's not a problem.
-        */
-       text_poke_smp_batch(jump_poke_params, c);
-}
-
-/* Replace a relative jump with a breakpoint (int3).  */
-void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
-{
-       u8 buf[RELATIVEJUMP_SIZE];
-
-       /* Set int3 to first byte for kprobes */
-       buf[0] = BREAKPOINT_INSTRUCTION;
-       memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-       text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
-}
-
-int  __kprobes
-setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
-{
-       struct optimized_kprobe *op;
-
-       if (p->flags & KPROBE_FLAG_OPTIMIZED) {
-               /* This kprobe is really able to run optimized path. */
-               op = container_of(p, struct optimized_kprobe, kp);
-               /* Detour through copied instructions */
-               regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
-               if (!reenter)
-                       reset_current_kprobe();
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
-int __kprobes arch_init_optprobes(void)
-{
-       /* Allocate code buffer and parameter array */
-       jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
-                                MAX_OPTIMIZE_PROBES, GFP_KERNEL);
-       if (!jump_poke_bufs)
-               return -ENOMEM;
-
-       jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
-                                  MAX_OPTIMIZE_PROBES, GFP_KERNEL);
-       if (!jump_poke_params) {
-               kfree(jump_poke_bufs);
-               jump_poke_bufs = NULL;
-               return -ENOMEM;
-       }
-
-       return 0;
-}
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
deleted file mode 100644 (file)
index 57916c0..0000000
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- *  Kernel Probes (KProbes)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- *
- * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
- *             Probes initial implementation ( includes contributions from
- *             Rusty Russell).
- * 2004-July   Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
- *             interface to access function arguments.
- * 2004-Oct    Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
- *             <prasanna@in.ibm.com> adapted for x86_64 from i386.
- * 2005-Mar    Roland McGrath <roland@redhat.com>
- *             Fixed to handle %rip-relative addressing mode correctly.
- * 2005-May    Hien Nguyen <hien@us.ibm.com>, Jim Keniston
- *             <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
- *             <prasanna@in.ibm.com> added function-return probes.
- * 2005-May    Rusty Lynch <rusty.lynch@intel.com>
- *             Added function return probes functionality
- * 2006-Feb    Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
- *             kprobe-booster and kretprobe-booster for i386.
- * 2007-Dec    Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
- *             and kretprobe-booster for x86-64
- * 2007-Dec    Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
- *             <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
- *             unified x86 kprobes code.
- */
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/hardirq.h>
-#include <linux/preempt.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-#include <linux/kallsyms.h>
-#include <linux/ftrace.h>
-
-#include <asm/cacheflush.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/insn.h>
-#include <asm/debugreg.h>
-
-#include "kprobes-common.h"
-
-void jprobe_return_end(void);
-
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
-
-#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
-       (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
-         (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
-         (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
-         (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
-        << (row % 32))
-       /*
-        * Undefined/reserved opcodes, conditional jump, Opcode Extension
-        * Groups, and some special opcodes can not boost.
-        * This is non-const and volatile to keep gcc from statically
-        * optimizing it out, as variable_test_bit makes gcc think only
-        * *(unsigned long*) is used. 
-        */
-static volatile u32 twobyte_is_boostable[256 / 32] = {
-       /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-       /*      ----------------------------------------------          */
-       W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
-       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
-       W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
-       W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
-       W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
-       W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
-       W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
-       W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
-       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
-       W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
-       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
-       W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
-       W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
-       W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
-       W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
-       /*      -----------------------------------------------         */
-       /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-};
-#undef W
-
-struct kretprobe_blackpoint kretprobe_blacklist[] = {
-       {"__switch_to", }, /* This function switches only current task, but
-                             doesn't switch kernel stack.*/
-       {NULL, NULL}    /* Terminator */
-};
-
-const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
-
-static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
-{
-       struct __arch_relative_insn {
-               u8 op;
-               s32 raddr;
-       } __attribute__((packed)) *insn;
-
-       insn = (struct __arch_relative_insn *)from;
-       insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
-       insn->op = op;
-}
-
-/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-void __kprobes synthesize_reljump(void *from, void *to)
-{
-       __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
-}
-
-/* Insert a call instruction at address 'from', which calls address 'to'.*/
-void __kprobes synthesize_relcall(void *from, void *to)
-{
-       __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
-}
-
-/*
- * Skip the prefixes of the instruction.
- */
-static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
-{
-       insn_attr_t attr;
-
-       attr = inat_get_opcode_attribute((insn_byte_t)*insn);
-       while (inat_is_legacy_prefix(attr)) {
-               insn++;
-               attr = inat_get_opcode_attribute((insn_byte_t)*insn);
-       }
-#ifdef CONFIG_X86_64
-       if (inat_is_rex_prefix(attr))
-               insn++;
-#endif
-       return insn;
-}
-
-/*
- * Returns non-zero if opcode is boostable.
- * RIP relative instructions are adjusted at copying time in 64 bits mode
- */
-int __kprobes can_boost(kprobe_opcode_t *opcodes)
-{
-       kprobe_opcode_t opcode;
-       kprobe_opcode_t *orig_opcodes = opcodes;
-
-       if (search_exception_tables((unsigned long)opcodes))
-               return 0;       /* Page fault may occur on this address. */
-
-retry:
-       if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
-               return 0;
-       opcode = *(opcodes++);
-
-       /* 2nd-byte opcode */
-       if (opcode == 0x0f) {
-               if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
-                       return 0;
-               return test_bit(*opcodes,
-                               (unsigned long *)twobyte_is_boostable);
-       }
-
-       switch (opcode & 0xf0) {
-#ifdef CONFIG_X86_64
-       case 0x40:
-               goto retry; /* REX prefix is boostable */
-#endif
-       case 0x60:
-               if (0x63 < opcode && opcode < 0x67)
-                       goto retry; /* prefixes */
-               /* can't boost Address-size override and bound */
-               return (opcode != 0x62 && opcode != 0x67);
-       case 0x70:
-               return 0; /* can't boost conditional jump */
-       case 0xc0:
-               /* can't boost software-interruptions */
-               return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
-       case 0xd0:
-               /* can boost AA* and XLAT */
-               return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
-       case 0xe0:
-               /* can boost in/out and absolute jmps */
-               return ((opcode & 0x04) || opcode == 0xea);
-       case 0xf0:
-               if ((opcode & 0x0c) == 0 && opcode != 0xf1)
-                       goto retry; /* lock/rep(ne) prefix */
-               /* clear and set flags are boostable */
-               return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
-       default:
-               /* segment override prefixes are boostable */
-               if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
-                       goto retry; /* prefixes */
-               /* CS override prefix and call are not boostable */
-               return (opcode != 0x2e && opcode != 0x9a);
-       }
-}
-
-static unsigned long
-__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
-{
-       struct kprobe *kp;
-
-       kp = get_kprobe((void *)addr);
-       /* There is no probe, return original address */
-       if (!kp)
-               return addr;
-
-       /*
-        *  Basically, kp->ainsn.insn has an original instruction.
-        *  However, RIP-relative instruction can not do single-stepping
-        *  at different place, __copy_instruction() tweaks the displacement of
-        *  that instruction. In that case, we can't recover the instruction
-        *  from the kp->ainsn.insn.
-        *
-        *  On the other hand, kp->opcode has a copy of the first byte of
-        *  the probed instruction, which is overwritten by int3. And
-        *  the instruction at kp->addr is not modified by kprobes except
-        *  for the first byte, we can recover the original instruction
-        *  from it and kp->opcode.
-        */
-       memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-       buf[0] = kp->opcode;
-       return (unsigned long)buf;
-}
-
-/*
- * Recover the probed instruction at addr for further analysis.
- * Caller must lock kprobes by kprobe_mutex, or disable preemption
- * for preventing to release referencing kprobes.
- */
-unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
-{
-       unsigned long __addr;
-
-       __addr = __recover_optprobed_insn(buf, addr);
-       if (__addr != addr)
-               return __addr;
-
-       return __recover_probed_insn(buf, addr);
-}
-
-/* Check if paddr is at an instruction boundary */
-static int __kprobes can_probe(unsigned long paddr)
-{
-       unsigned long addr, __addr, offset = 0;
-       struct insn insn;
-       kprobe_opcode_t buf[MAX_INSN_SIZE];
-
-       if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
-               return 0;
-
-       /* Decode instructions */
-       addr = paddr - offset;
-       while (addr < paddr) {
-               /*
-                * Check if the instruction has been modified by another
-                * kprobe, in which case we replace the breakpoint by the
-                * original instruction in our buffer.
-                * Also, jump optimization will change the breakpoint to
-                * relative-jump. Since the relative-jump itself is
-                * normally used, we just go through if there is no kprobe.
-                */
-               __addr = recover_probed_instruction(buf, addr);
-               kernel_insn_init(&insn, (void *)__addr);
-               insn_get_length(&insn);
-
-               /*
-                * Another debugging subsystem might insert this breakpoint.
-                * In that case, we can't recover it.
-                */
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
-                       return 0;
-               addr += insn.length;
-       }
-
-       return (addr == paddr);
-}
-
-/*
- * Returns non-zero if opcode modifies the interrupt flag.
- */
-static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
-{
-       /* Skip prefixes */
-       insn = skip_prefixes(insn);
-
-       switch (*insn) {
-       case 0xfa:              /* cli */
-       case 0xfb:              /* sti */
-       case 0xcf:              /* iret/iretd */
-       case 0x9d:              /* popf/popfd */
-               return 1;
-       }
-
-       return 0;
-}
-
-/*
- * Copy an instruction and adjust the displacement if the instruction
- * uses the %rip-relative addressing mode.
- * If it does, Return the address of the 32-bit displacement word.
- * If not, return null.
- * Only applicable to 64-bit x86.
- */
-int __kprobes __copy_instruction(u8 *dest, u8 *src)
-{
-       struct insn insn;
-       kprobe_opcode_t buf[MAX_INSN_SIZE];
-
-       kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
-       insn_get_length(&insn);
-       /* Another subsystem puts a breakpoint, failed to recover */
-       if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
-               return 0;
-       memcpy(dest, insn.kaddr, insn.length);
-
-#ifdef CONFIG_X86_64
-       if (insn_rip_relative(&insn)) {
-               s64 newdisp;
-               u8 *disp;
-               kernel_insn_init(&insn, dest);
-               insn_get_displacement(&insn);
-               /*
-                * The copied instruction uses the %rip-relative addressing
-                * mode.  Adjust the displacement for the difference between
-                * the original location of this instruction and the location
-                * of the copy that will actually be run.  The tricky bit here
-                * is making sure that the sign extension happens correctly in
-                * this calculation, since we need a signed 32-bit result to
-                * be sign-extended to 64 bits when it's added to the %rip
-                * value and yield the same 64-bit result that the sign-
-                * extension of the original signed 32-bit displacement would
-                * have given.
-                */
-               newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
-               BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check.  */
-               disp = (u8 *) dest + insn_offset_displacement(&insn);
-               *(s32 *) disp = (s32) newdisp;
-       }
-#endif
-       return insn.length;
-}
-
-static void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
-       /* Copy an instruction with recovering if other optprobe modifies it.*/
-       __copy_instruction(p->ainsn.insn, p->addr);
-
-       /*
-        * __copy_instruction can modify the displacement of the instruction,
-        * but it doesn't affect boostable check.
-        */
-       if (can_boost(p->ainsn.insn))
-               p->ainsn.boostable = 0;
-       else
-               p->ainsn.boostable = -1;
-
-       /* Also, displacement change doesn't affect the first byte */
-       p->opcode = p->ainsn.insn[0];
-}
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
-       if (alternatives_text_reserved(p->addr, p->addr))
-               return -EINVAL;
-
-       if (!can_probe((unsigned long)p->addr))
-               return -EILSEQ;
-       /* insn: must be on special executable page on x86. */
-       p->ainsn.insn = get_insn_slot();
-       if (!p->ainsn.insn)
-               return -ENOMEM;
-       arch_copy_kprobe(p);
-       return 0;
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
-       text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
-{
-       text_poke(p->addr, &p->opcode, 1);
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
-       if (p->ainsn.insn) {
-               free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
-               p->ainsn.insn = NULL;
-       }
-}
-
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
-       kcb->prev_kprobe.kp = kprobe_running();
-       kcb->prev_kprobe.status = kcb->kprobe_status;
-       kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
-       kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
-}
-
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
-       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
-       kcb->kprobe_status = kcb->prev_kprobe.status;
-       kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
-       kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
-                               struct kprobe_ctlblk *kcb)
-{
-       __this_cpu_write(current_kprobe, p);
-       kcb->kprobe_saved_flags = kcb->kprobe_old_flags
-               = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
-       if (is_IF_modifier(p->ainsn.insn))
-               kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
-}
-
-static void __kprobes clear_btf(void)
-{
-       if (test_thread_flag(TIF_BLOCKSTEP)) {
-               unsigned long debugctl = get_debugctlmsr();
-
-               debugctl &= ~DEBUGCTLMSR_BTF;
-               update_debugctlmsr(debugctl);
-       }
-}
-
-static void __kprobes restore_btf(void)
-{
-       if (test_thread_flag(TIF_BLOCKSTEP)) {
-               unsigned long debugctl = get_debugctlmsr();
-
-               debugctl |= DEBUGCTLMSR_BTF;
-               update_debugctlmsr(debugctl);
-       }
-}
-
-void __kprobes
-arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
-       unsigned long *sara = stack_addr(regs);
-
-       ri->ret_addr = (kprobe_opcode_t *) *sara;
-
-       /* Replace the return addr with trampoline addr */
-       *sara = (unsigned long) &kretprobe_trampoline;
-}
-
-static void __kprobes
-setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter)
-{
-       if (setup_detour_execution(p, regs, reenter))
-               return;
-
-#if !defined(CONFIG_PREEMPT)
-       if (p->ainsn.boostable == 1 && !p->post_handler) {
-               /* Boost up -- we can execute copied instructions directly */
-               if (!reenter)
-                       reset_current_kprobe();
-               /*
-                * Reentering boosted probe doesn't reset current_kprobe,
-                * nor set current_kprobe, because it doesn't use single
-                * stepping.
-                */
-               regs->ip = (unsigned long)p->ainsn.insn;
-               preempt_enable_no_resched();
-               return;
-       }
-#endif
-       if (reenter) {
-               save_previous_kprobe(kcb);
-               set_current_kprobe(p, regs, kcb);
-               kcb->kprobe_status = KPROBE_REENTER;
-       } else
-               kcb->kprobe_status = KPROBE_HIT_SS;
-       /* Prepare real single stepping */
-       clear_btf();
-       regs->flags |= X86_EFLAGS_TF;
-       regs->flags &= ~X86_EFLAGS_IF;
-       /* single step inline if the instruction is an int3 */
-       if (p->opcode == BREAKPOINT_INSTRUCTION)
-               regs->ip = (unsigned long)p->addr;
-       else
-               regs->ip = (unsigned long)p->ainsn.insn;
-}
-
-/*
- * We have reentered the kprobe_handler(), since another probe was hit while
- * within the handler. We save the original kprobes variables and just single
- * step on the instruction of the new probe without calling any user handlers.
- */
-static int __kprobes
-reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
-{
-       switch (kcb->kprobe_status) {
-       case KPROBE_HIT_SSDONE:
-       case KPROBE_HIT_ACTIVE:
-               kprobes_inc_nmissed_count(p);
-               setup_singlestep(p, regs, kcb, 1);
-               break;
-       case KPROBE_HIT_SS:
-               /* A probe has been hit in the codepath leading up to, or just
-                * after, single-stepping of a probed instruction. This entire
-                * codepath should strictly reside in .kprobes.text section.
-                * Raise a BUG or we'll continue in an endless reentering loop
-                * and eventually a stack overflow.
-                */
-               printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
-                      p->addr);
-               dump_kprobe(p);
-               BUG();
-       default:
-               /* impossible cases */
-               WARN_ON(1);
-               return 0;
-       }
-
-       return 1;
-}
-
-#ifdef KPROBES_CAN_USE_FTRACE
-static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                     struct kprobe_ctlblk *kcb)
-{
-       /*
-        * Emulate singlestep (and also recover regs->ip)
-        * as if there is a 5byte nop
-        */
-       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
-       if (unlikely(p->post_handler)) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               p->post_handler(p, regs, 0);
-       }
-       __this_cpu_write(current_kprobe, NULL);
-}
-#endif
-
-/*
- * Interrupts are disabled on entry as trap3 is an interrupt gate and they
- * remain disabled throughout this function.
- */
-static int __kprobes kprobe_handler(struct pt_regs *regs)
-{
-       kprobe_opcode_t *addr;
-       struct kprobe *p;
-       struct kprobe_ctlblk *kcb;
-
-       addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
-       /*
-        * We don't want to be preempted for the entire
-        * duration of kprobe processing. We conditionally
-        * re-enable preemption at the end of this function,
-        * and also in reenter_kprobe() and setup_singlestep().
-        */
-       preempt_disable();
-
-       kcb = get_kprobe_ctlblk();
-       p = get_kprobe(addr);
-
-       if (p) {
-               if (kprobe_running()) {
-                       if (reenter_kprobe(p, regs, kcb))
-                               return 1;
-               } else {
-                       set_current_kprobe(p, regs, kcb);
-                       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
-                       /*
-                        * If we have no pre-handler or it returned 0, we
-                        * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry
-                        * for jprobe processing, so get out doing nothing
-                        * more here.
-                        */
-                       if (!p->pre_handler || !p->pre_handler(p, regs))
-                               setup_singlestep(p, regs, kcb, 0);
-                       return 1;
-               }
-       } else if (*addr != BREAKPOINT_INSTRUCTION) {
-               /*
-                * The breakpoint instruction was removed right
-                * after we hit it.  Another cpu has removed
-                * either a probepoint or a debugger breakpoint
-                * at this address.  In either case, no further
-                * handling of this interrupt is appropriate.
-                * Back up over the (now missing) int3 and run
-                * the original instruction.
-                */
-               regs->ip = (unsigned long)addr;
-               preempt_enable_no_resched();
-               return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-#ifdef KPROBES_CAN_USE_FTRACE
-                       if (kprobe_ftrace(p)) {
-                               skip_singlestep(p, regs, kcb);
-                               return 1;
-                       }
-#endif
-                       setup_singlestep(p, regs, kcb, 0);
-                       return 1;
-               }
-       } /* else: not a kprobe fault; let the kernel handle it */
-
-       preempt_enable_no_resched();
-       return 0;
-}
-
-/*
- * When a retprobed function returns, this code saves registers and
- * calls trampoline_handler() runs, which calls the kretprobe's handler.
- */
-static void __used __kprobes kretprobe_trampoline_holder(void)
-{
-       asm volatile (
-                       ".global kretprobe_trampoline\n"
-                       "kretprobe_trampoline: \n"
-#ifdef CONFIG_X86_64
-                       /* We don't bother saving the ss register */
-                       "       pushq %rsp\n"
-                       "       pushfq\n"
-                       SAVE_REGS_STRING
-                       "       movq %rsp, %rdi\n"
-                       "       call trampoline_handler\n"
-                       /* Replace saved sp with true return address. */
-                       "       movq %rax, 152(%rsp)\n"
-                       RESTORE_REGS_STRING
-                       "       popfq\n"
-#else
-                       "       pushf\n"
-                       SAVE_REGS_STRING
-                       "       movl %esp, %eax\n"
-                       "       call trampoline_handler\n"
-                       /* Move flags to cs */
-                       "       movl 56(%esp), %edx\n"
-                       "       movl %edx, 52(%esp)\n"
-                       /* Replace saved flags with true return address. */
-                       "       movl %eax, 56(%esp)\n"
-                       RESTORE_REGS_STRING
-                       "       popf\n"
-#endif
-                       "       ret\n");
-}
-
-/*
- * Called from kretprobe_trampoline
- */
-static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
-{
-       struct kretprobe_instance *ri = NULL;
-       struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
-       unsigned long flags, orig_ret_address = 0;
-       unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
-       kprobe_opcode_t *correct_ret_addr = NULL;
-
-       INIT_HLIST_HEAD(&empty_rp);
-       kretprobe_hash_lock(current, &head, &flags);
-       /* fixup registers */
-#ifdef CONFIG_X86_64
-       regs->cs = __KERNEL_CS;
-#else
-       regs->cs = __KERNEL_CS | get_kernel_rpl();
-       regs->gs = 0;
-#endif
-       regs->ip = trampoline_address;
-       regs->orig_ax = ~0UL;
-
-       /*
-        * It is possible to have multiple instances associated with a given
-        * task either because multiple functions in the call path have
-        * return probes installed on them, and/or more than one
-        * return probe was registered for a target function.
-        *
-        * We can handle this because:
-        *     - instances are always pushed into the head of the list
-        *     - when multiple return probes are registered for the same
-        *       function, the (chronologically) first instance's ret_addr
-        *       will be the real return address, and all the rest will
-        *       point to kretprobe_trampoline.
-        */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
-               if (ri->task != current)
-                       /* another task is sharing our hash bucket */
-                       continue;
-
-               orig_ret_address = (unsigned long)ri->ret_addr;
-
-               if (orig_ret_address != trampoline_address)
-                       /*
-                        * This is the real return address. Any other
-                        * instances associated with this task are for
-                        * other calls deeper on the call stack
-                        */
-                       break;
-       }
-
-       kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
-       correct_ret_addr = ri->ret_addr;
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
-               if (ri->task != current)
-                       /* another task is sharing our hash bucket */
-                       continue;
-
-               orig_ret_address = (unsigned long)ri->ret_addr;
-               if (ri->rp && ri->rp->handler) {
-                       __this_cpu_write(current_kprobe, &ri->rp->kp);
-                       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
-                       ri->ret_addr = correct_ret_addr;
-                       ri->rp->handler(ri, regs);
-                       __this_cpu_write(current_kprobe, NULL);
-               }
-
-               recycle_rp_inst(ri, &empty_rp);
-
-               if (orig_ret_address != trampoline_address)
-                       /*
-                        * This is the real return address. Any other
-                        * instances associated with this task are for
-                        * other calls deeper on the call stack
-                        */
-                       break;
-       }
-
-       kretprobe_hash_unlock(current, &flags);
-
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
-               hlist_del(&ri->hlist);
-               kfree(ri);
-       }
-       return (void *)orig_ret_address;
-}
-
-/*
- * Called after single-stepping.  p->addr is the address of the
- * instruction whose first byte has been replaced by the "int 3"
- * instruction.  To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction.  The address of this
- * copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * interrupt.  We have to fix up the stack as follows:
- *
- * 0) Except in the case of absolute or indirect jump or call instructions,
- * the new ip is relative to the copied instruction.  We need to make
- * it relative to the original instruction.
- *
- * 1) If the single-stepped instruction was pushfl, then the TF and IF
- * flags are set in the just-pushed flags, and may need to be cleared.
- *
- * 2) If the single-stepped instruction was a call, the return address
- * that is atop the stack is the address following the copied instruction.
- * We need to make it the address following the original instruction.
- *
- * If this is the first time we've single-stepped the instruction at
- * this probepoint, and the instruction is boostable, boost it: add a
- * jump instruction after the copied instruction, that jumps to the next
- * instruction after the probepoint.
- */
-static void __kprobes
-resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
-{
-       unsigned long *tos = stack_addr(regs);
-       unsigned long copy_ip = (unsigned long)p->ainsn.insn;
-       unsigned long orig_ip = (unsigned long)p->addr;
-       kprobe_opcode_t *insn = p->ainsn.insn;
-
-       /* Skip prefixes */
-       insn = skip_prefixes(insn);
-
-       regs->flags &= ~X86_EFLAGS_TF;
-       switch (*insn) {
-       case 0x9c:      /* pushfl */
-               *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
-               *tos |= kcb->kprobe_old_flags;
-               break;
-       case 0xc2:      /* iret/ret/lret */
-       case 0xc3:
-       case 0xca:
-       case 0xcb:
-       case 0xcf:
-       case 0xea:      /* jmp absolute -- ip is correct */
-               /* ip is already adjusted, no more changes required */
-               p->ainsn.boostable = 1;
-               goto no_change;
-       case 0xe8:      /* call relative - Fix return addr */
-               *tos = orig_ip + (*tos - copy_ip);
-               break;
-#ifdef CONFIG_X86_32
-       case 0x9a:      /* call absolute -- same as call absolute, indirect */
-               *tos = orig_ip + (*tos - copy_ip);
-               goto no_change;
-#endif
-       case 0xff:
-               if ((insn[1] & 0x30) == 0x10) {
-                       /*
-                        * call absolute, indirect
-                        * Fix return addr; ip is correct.
-                        * But this is not boostable
-                        */
-                       *tos = orig_ip + (*tos - copy_ip);
-                       goto no_change;
-               } else if (((insn[1] & 0x31) == 0x20) ||
-                          ((insn[1] & 0x31) == 0x21)) {
-                       /*
-                        * jmp near and far, absolute indirect
-                        * ip is correct. And this is boostable
-                        */
-                       p->ainsn.boostable = 1;
-                       goto no_change;
-               }
-       default:
-               break;
-       }
-
-       if (p->ainsn.boostable == 0) {
-               if ((regs->ip > copy_ip) &&
-                   (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
-                       /*
-                        * These instructions can be executed directly if it
-                        * jumps back to correct address.
-                        */
-                       synthesize_reljump((void *)regs->ip,
-                               (void *)orig_ip + (regs->ip - copy_ip));
-                       p->ainsn.boostable = 1;
-               } else {
-                       p->ainsn.boostable = -1;
-               }
-       }
-
-       regs->ip += orig_ip - copy_ip;
-
-no_change:
-       restore_btf();
-}
-
-/*
- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled throughout this function.
- */
-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
-{
-       struct kprobe *cur = kprobe_running();
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (!cur)
-               return 0;
-
-       resume_execution(cur, regs, kcb);
-       regs->flags |= kcb->kprobe_saved_flags;
-
-       if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               cur->post_handler(cur, regs, 0);
-       }
-
-       /* Restore back the original saved kprobes variables and continue. */
-       if (kcb->kprobe_status == KPROBE_REENTER) {
-               restore_previous_kprobe(kcb);
-               goto out;
-       }
-       reset_current_kprobe();
-out:
-       preempt_enable_no_resched();
-
-       /*
-        * if somebody else is singlestepping across a probe point, flags
-        * will have TF set, in which case, continue the remaining processing
-        * of do_debug, as if this is not a probe hit.
-        */
-       if (regs->flags & X86_EFLAGS_TF)
-               return 0;
-
-       return 1;
-}
-
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
-       struct kprobe *cur = kprobe_running();
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       switch (kcb->kprobe_status) {
-       case KPROBE_HIT_SS:
-       case KPROBE_REENTER:
-               /*
-                * We are here because the instruction being single
-                * stepped caused a page fault. We reset the current
-                * kprobe and the ip points back to the probe address
-                * and allow the page fault handler to continue as a
-                * normal page fault.
-                */
-               regs->ip = (unsigned long)cur->addr;
-               regs->flags |= kcb->kprobe_old_flags;
-               if (kcb->kprobe_status == KPROBE_REENTER)
-                       restore_previous_kprobe(kcb);
-               else
-                       reset_current_kprobe();
-               preempt_enable_no_resched();
-               break;
-       case KPROBE_HIT_ACTIVE:
-       case KPROBE_HIT_SSDONE:
-               /*
-                * We increment the nmissed count for accounting,
-                * we can also use npre/npostfault count for accounting
-                * these specific fault cases.
-                */
-               kprobes_inc_nmissed_count(cur);
-
-               /*
-                * We come here because instructions in the pre/post
-                * handler caused the page_fault, this could happen
-                * if handler tries to access user space by
-                * copy_from_user(), get_user() etc. Let the
-                * user-specified handler try to fix it first.
-                */
-               if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
-                       return 1;
-
-               /*
-                * In case the user-specified fault handler returned
-                * zero, try to fix up.
-                */
-               if (fixup_exception(regs))
-                       return 1;
-
-               /*
-                * fixup routine could not handle it,
-                * Let do_page_fault() fix it.
-                */
-               break;
-       default:
-               break;
-       }
-       return 0;
-}
-
-/*
- * Wrapper routine for handling exceptions.
- */
-int __kprobes
-kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
-{
-       struct die_args *args = data;
-       int ret = NOTIFY_DONE;
-
-       if (args->regs && user_mode_vm(args->regs))
-               return ret;
-
-       switch (val) {
-       case DIE_INT3:
-               if (kprobe_handler(args->regs))
-                       ret = NOTIFY_STOP;
-               break;
-       case DIE_DEBUG:
-               if (post_kprobe_handler(args->regs)) {
-                       /*
-                        * Reset the BS bit in dr6 (pointed by args->err) to
-                        * denote completion of processing
-                        */
-                       (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
-                       ret = NOTIFY_STOP;
-               }
-               break;
-       case DIE_GPF:
-               /*
-                * To be potentially processing a kprobe fault and to
-                * trust the result from kprobe_running(), we have
-                * be non-preemptible.
-                */
-               if (!preemptible() && kprobe_running() &&
-                   kprobe_fault_handler(args->regs, args->trapnr))
-                       ret = NOTIFY_STOP;
-               break;
-       default:
-               break;
-       }
-       return ret;
-}
-
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_sp = stack_addr(regs);
-       addr = (unsigned long)(kcb->jprobe_saved_sp);
-
-       /*
-        * As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
-        */
-       memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
-              MIN_STACK_SIZE(addr));
-       regs->flags &= ~X86_EFLAGS_IF;
-       trace_hardirqs_off();
-       regs->ip = (unsigned long)(jp->entry);
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       asm volatile (
-#ifdef CONFIG_X86_64
-                       "       xchg   %%rbx,%%rsp      \n"
-#else
-                       "       xchgl   %%ebx,%%esp     \n"
-#endif
-                       "       int3                    \n"
-                       "       .globl jprobe_return_end\n"
-                       "       jprobe_return_end:      \n"
-                       "       nop                     \n"::"b"
-                       (kcb->jprobe_saved_sp):"memory");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       u8 *addr = (u8 *) (regs->ip - 1);
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-
-       if ((addr > (u8 *) jprobe_return) &&
-           (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != kcb->jprobe_saved_sp) {
-                       struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
-                       printk(KERN_ERR
-                              "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), kcb->jprobe_saved_sp);
-                       printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
-                       show_regs(saved_regs);
-                       printk(KERN_ERR "Current registers\n");
-                       show_regs(regs);
-                       BUG();
-               }
-               *regs = kcb->jprobe_saved_regs;
-               memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
-                      kcb->jprobes_stack,
-                      MIN_STACK_SIZE(kcb->jprobe_saved_sp));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
-#ifdef KPROBES_CAN_USE_FTRACE
-/* Ftrace callback handler for kprobes */
-void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
-                                    struct ftrace_ops *ops, struct pt_regs *regs)
-{
-       struct kprobe *p;
-       struct kprobe_ctlblk *kcb;
-       unsigned long flags;
-
-       /* Disable irq for emulating a breakpoint and avoiding preempt */
-       local_irq_save(flags);
-
-       p = get_kprobe((kprobe_opcode_t *)ip);
-       if (unlikely(!p) || kprobe_disabled(p))
-               goto end;
-
-       kcb = get_kprobe_ctlblk();
-       if (kprobe_running()) {
-               kprobes_inc_nmissed_count(p);
-       } else {
-               /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
-               regs->ip = ip + sizeof(kprobe_opcode_t);
-
-               __this_cpu_write(current_kprobe, p);
-               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               if (!p->pre_handler || !p->pre_handler(p, regs))
-                       skip_singlestep(p, regs, kcb);
-               /*
-                * If pre_handler returns !0, it sets regs->ip and
-                * resets current kprobe.
-                */
-       }
-end:
-       local_irq_restore(flags);
-}
-
-int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
-{
-       p->ainsn.insn = NULL;
-       p->ainsn.boostable = -1;
-       return 0;
-}
-#endif
-
-int __init arch_init_kprobes(void)
-{
-       return arch_init_optprobes();
-}
-
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
-{
-       return 0;
-}
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile
new file mode 100644 (file)
index 0000000..0d33169
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for kernel probes
+#
+
+obj-$(CONFIG_KPROBES)          += core.o
+obj-$(CONFIG_OPTPROBES)                += opt.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)        += ftrace.o
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
new file mode 100644 (file)
index 0000000..2e9d4b5
--- /dev/null
@@ -0,0 +1,113 @@
+#ifndef __X86_KERNEL_KPROBES_COMMON_H
+#define __X86_KERNEL_KPROBES_COMMON_H
+
+/* Kprobes and Optprobes common header */
+
+#ifdef CONFIG_X86_64
+#define SAVE_REGS_STRING                       \
+       /* Skip cs, ip, orig_ax. */             \
+       "       subq $24, %rsp\n"               \
+       "       pushq %rdi\n"                   \
+       "       pushq %rsi\n"                   \
+       "       pushq %rdx\n"                   \
+       "       pushq %rcx\n"                   \
+       "       pushq %rax\n"                   \
+       "       pushq %r8\n"                    \
+       "       pushq %r9\n"                    \
+       "       pushq %r10\n"                   \
+       "       pushq %r11\n"                   \
+       "       pushq %rbx\n"                   \
+       "       pushq %rbp\n"                   \
+       "       pushq %r12\n"                   \
+       "       pushq %r13\n"                   \
+       "       pushq %r14\n"                   \
+       "       pushq %r15\n"
+#define RESTORE_REGS_STRING                    \
+       "       popq %r15\n"                    \
+       "       popq %r14\n"                    \
+       "       popq %r13\n"                    \
+       "       popq %r12\n"                    \
+       "       popq %rbp\n"                    \
+       "       popq %rbx\n"                    \
+       "       popq %r11\n"                    \
+       "       popq %r10\n"                    \
+       "       popq %r9\n"                     \
+       "       popq %r8\n"                     \
+       "       popq %rax\n"                    \
+       "       popq %rcx\n"                    \
+       "       popq %rdx\n"                    \
+       "       popq %rsi\n"                    \
+       "       popq %rdi\n"                    \
+       /* Skip orig_ax, ip, cs */              \
+       "       addq $24, %rsp\n"
+#else
+#define SAVE_REGS_STRING                       \
+       /* Skip cs, ip, orig_ax and gs. */      \
+       "       subl $16, %esp\n"               \
+       "       pushl %fs\n"                    \
+       "       pushl %es\n"                    \
+       "       pushl %ds\n"                    \
+       "       pushl %eax\n"                   \
+       "       pushl %ebp\n"                   \
+       "       pushl %edi\n"                   \
+       "       pushl %esi\n"                   \
+       "       pushl %edx\n"                   \
+       "       pushl %ecx\n"                   \
+       "       pushl %ebx\n"
+#define RESTORE_REGS_STRING                    \
+       "       popl %ebx\n"                    \
+       "       popl %ecx\n"                    \
+       "       popl %edx\n"                    \
+       "       popl %esi\n"                    \
+       "       popl %edi\n"                    \
+       "       popl %ebp\n"                    \
+       "       popl %eax\n"                    \
+       /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
+       "       addl $24, %esp\n"
+#endif
+
+/* Ensure if the instruction can be boostable */
+extern int can_boost(kprobe_opcode_t *instruction);
+/* Recover instruction if given address is probed */
+extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
+                                        unsigned long addr);
+/*
+ * Copy an instruction and adjust the displacement if the instruction
+ * uses the %rip-relative addressing mode.
+ */
+extern int __copy_instruction(u8 *dest, u8 *src);
+
+/* Generate a relative-jump/call instruction */
+extern void synthesize_reljump(void *from, void *to);
+extern void synthesize_relcall(void *from, void *to);
+
+#ifdef CONFIG_OPTPROBES
+extern int arch_init_optprobes(void);
+extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
+extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
+#else  /* !CONFIG_OPTPROBES */
+static inline int arch_init_optprobes(void)
+{
+       return 0;
+}
+static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
+{
+       return 0;
+}
+static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
+{
+       return addr;
+}
+#endif
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                          struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                                 struct kprobe_ctlblk *kcb)
+{
+       return 0;
+}
+#endif
+#endif
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
new file mode 100644 (file)
index 0000000..e124554
--- /dev/null
@@ -0,0 +1,1064 @@
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *             Probes initial implementation ( includes contributions from
+ *             Rusty Russell).
+ * 2004-July   Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
+ *             interface to access function arguments.
+ * 2004-Oct    Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ *             <prasanna@in.ibm.com> adapted for x86_64 from i386.
+ * 2005-Mar    Roland McGrath <roland@redhat.com>
+ *             Fixed to handle %rip-relative addressing mode correctly.
+ * 2005-May    Hien Nguyen <hien@us.ibm.com>, Jim Keniston
+ *             <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ *             <prasanna@in.ibm.com> added function-return probes.
+ * 2005-May    Rusty Lynch <rusty.lynch@intel.com>
+ *             Added function return probes functionality
+ * 2006-Feb    Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
+ *             kprobe-booster and kretprobe-booster for i386.
+ * 2007-Dec    Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
+ *             and kretprobe-booster for x86-64
+ * 2007-Dec    Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
+ *             <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
+ *             unified x86 kprobes code.
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/module.h>
+#include <linux/kdebug.h>
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/insn.h>
+#include <asm/debugreg.h>
+
+#include "common.h"
+
+void jprobe_return_end(void);
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
+
+#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
+       (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
+         (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
+         (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
+         (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
+        << (row % 32))
+       /*
+        * Undefined/reserved opcodes, conditional jump, Opcode Extension
+        * Groups, and some special opcodes can not boost.
+        * This is non-const and volatile to keep gcc from statically
+        * optimizing it out, as variable_test_bit makes gcc think only
+        * *(unsigned long*) is used.
+        */
+static volatile u32 twobyte_is_boostable[256 / 32] = {
+       /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
+       /*      ----------------------------------------------          */
+       W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
+       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
+       W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
+       W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+       W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
+       W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
+       W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
+       W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+       W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
+       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+       W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
+       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
+       W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
+       W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
+       W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
+       W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
+       /*      -----------------------------------------------         */
+       /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
+};
+#undef W
+
+struct kretprobe_blackpoint kretprobe_blacklist[] = {
+       {"__switch_to", }, /* This function switches only current task, but
+                             doesn't switch kernel stack.*/
+       {NULL, NULL}    /* Terminator */
+};
+
+const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
+
+static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
+{
+       struct __arch_relative_insn {
+               u8 op;
+               s32 raddr;
+       } __packed *insn;
+
+       insn = (struct __arch_relative_insn *)from;
+       insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+       insn->op = op;
+}
+
+/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+void __kprobes synthesize_reljump(void *from, void *to)
+{
+       __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
+}
+
+/* Insert a call instruction at address 'from', which calls address 'to'.*/
+void __kprobes synthesize_relcall(void *from, void *to)
+{
+       __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
+}
+
+/*
+ * Skip the prefixes of the instruction.
+ */
+static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
+{
+       insn_attr_t attr;
+
+       attr = inat_get_opcode_attribute((insn_byte_t)*insn);
+       while (inat_is_legacy_prefix(attr)) {
+               insn++;
+               attr = inat_get_opcode_attribute((insn_byte_t)*insn);
+       }
+#ifdef CONFIG_X86_64
+       if (inat_is_rex_prefix(attr))
+               insn++;
+#endif
+       return insn;
+}
+
+/*
+ * Returns non-zero if opcode is boostable.
+ * RIP relative instructions are adjusted at copying time in 64 bits mode
+ */
+int __kprobes can_boost(kprobe_opcode_t *opcodes)
+{
+       kprobe_opcode_t opcode;
+       kprobe_opcode_t *orig_opcodes = opcodes;
+
+       if (search_exception_tables((unsigned long)opcodes))
+               return 0;       /* Page fault may occur on this address. */
+
+retry:
+       if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
+               return 0;
+       opcode = *(opcodes++);
+
+       /* 2nd-byte opcode */
+       if (opcode == 0x0f) {
+               if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
+                       return 0;
+               return test_bit(*opcodes,
+                               (unsigned long *)twobyte_is_boostable);
+       }
+
+       switch (opcode & 0xf0) {
+#ifdef CONFIG_X86_64
+       case 0x40:
+               goto retry; /* REX prefix is boostable */
+#endif
+       case 0x60:
+               if (0x63 < opcode && opcode < 0x67)
+                       goto retry; /* prefixes */
+               /* can't boost Address-size override and bound */
+               return (opcode != 0x62 && opcode != 0x67);
+       case 0x70:
+               return 0; /* can't boost conditional jump */
+       case 0xc0:
+               /* can't boost software-interruptions */
+               return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
+       case 0xd0:
+               /* can boost AA* and XLAT */
+               return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+       case 0xe0:
+               /* can boost in/out and absolute jmps */
+               return ((opcode & 0x04) || opcode == 0xea);
+       case 0xf0:
+               if ((opcode & 0x0c) == 0 && opcode != 0xf1)
+                       goto retry; /* lock/rep(ne) prefix */
+               /* clear and set flags are boostable */
+               return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+       default:
+               /* segment override prefixes are boostable */
+               if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
+                       goto retry; /* prefixes */
+               /* CS override prefix and call are not boostable */
+               return (opcode != 0x2e && opcode != 0x9a);
+       }
+}
+
+static unsigned long
+__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
+{
+       struct kprobe *kp;
+
+       kp = get_kprobe((void *)addr);
+       /* There is no probe, return original address */
+       if (!kp)
+               return addr;
+
+       /*
+        *  Basically, kp->ainsn.insn has an original instruction.
+        *  However, RIP-relative instruction can not do single-stepping
+        *  at different place, __copy_instruction() tweaks the displacement of
+        *  that instruction. In that case, we can't recover the instruction
+        *  from the kp->ainsn.insn.
+        *
+        *  On the other hand, kp->opcode has a copy of the first byte of
+        *  the probed instruction, which is overwritten by int3. And
+        *  the instruction at kp->addr is not modified by kprobes except
+        *  for the first byte, we can recover the original instruction
+        *  from it and kp->opcode.
+        */
+       memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       buf[0] = kp->opcode;
+       return (unsigned long)buf;
+}
+
+/*
+ * Recover the probed instruction at addr for further analysis.
+ * Caller must lock kprobes by kprobe_mutex, or disable preemption
+ * for preventing to release referencing kprobes.
+ */
+unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
+{
+       unsigned long __addr;
+
+       __addr = __recover_optprobed_insn(buf, addr);
+       if (__addr != addr)
+               return __addr;
+
+       return __recover_probed_insn(buf, addr);
+}
+
+/* Check if paddr is at an instruction boundary */
+static int __kprobes can_probe(unsigned long paddr)
+{
+       unsigned long addr, __addr, offset = 0;
+       struct insn insn;
+       kprobe_opcode_t buf[MAX_INSN_SIZE];
+
+       if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
+               return 0;
+
+       /* Decode instructions */
+       addr = paddr - offset;
+       while (addr < paddr) {
+               /*
+                * Check if the instruction has been modified by another
+                * kprobe, in which case we replace the breakpoint by the
+                * original instruction in our buffer.
+                * Also, jump optimization will change the breakpoint to
+                * relative-jump. Since the relative-jump itself is
+                * normally used, we just go through if there is no kprobe.
+                */
+               __addr = recover_probed_instruction(buf, addr);
+               kernel_insn_init(&insn, (void *)__addr);
+               insn_get_length(&insn);
+
+               /*
+                * Another debugging subsystem might insert this breakpoint.
+                * In that case, we can't recover it.
+                */
+               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+                       return 0;
+               addr += insn.length;
+       }
+
+       return (addr == paddr);
+}
+
+/*
+ * Returns non-zero if opcode modifies the interrupt flag.
+ */
+static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
+{
+       /* Skip prefixes */
+       insn = skip_prefixes(insn);
+
+       switch (*insn) {
+       case 0xfa:              /* cli */
+       case 0xfb:              /* sti */
+       case 0xcf:              /* iret/iretd */
+       case 0x9d:              /* popf/popfd */
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Copy an instruction and adjust the displacement if the instruction
+ * uses the %rip-relative addressing mode.
+ * If it does, Return the address of the 32-bit displacement word.
+ * If not, return null.
+ * Only applicable to 64-bit x86.
+ */
+int __kprobes __copy_instruction(u8 *dest, u8 *src)
+{
+       struct insn insn;
+       kprobe_opcode_t buf[MAX_INSN_SIZE];
+
+       kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
+       insn_get_length(&insn);
+       /* Another subsystem puts a breakpoint, failed to recover */
+       if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               return 0;
+       memcpy(dest, insn.kaddr, insn.length);
+
+#ifdef CONFIG_X86_64
+       if (insn_rip_relative(&insn)) {
+               s64 newdisp;
+               u8 *disp;
+               kernel_insn_init(&insn, dest);
+               insn_get_displacement(&insn);
+               /*
+                * The copied instruction uses the %rip-relative addressing
+                * mode.  Adjust the displacement for the difference between
+                * the original location of this instruction and the location
+                * of the copy that will actually be run.  The tricky bit here
+                * is making sure that the sign extension happens correctly in
+                * this calculation, since we need a signed 32-bit result to
+                * be sign-extended to 64 bits when it's added to the %rip
+                * value and yield the same 64-bit result that the sign-
+                * extension of the original signed 32-bit displacement would
+                * have given.
+                */
+               newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
+               BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check.  */
+               disp = (u8 *) dest + insn_offset_displacement(&insn);
+               *(s32 *) disp = (s32) newdisp;
+       }
+#endif
+       return insn.length;
+}
+
+static void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+       /* Copy an instruction with recovering if other optprobe modifies it.*/
+       __copy_instruction(p->ainsn.insn, p->addr);
+
+       /*
+        * __copy_instruction can modify the displacement of the instruction,
+        * but it doesn't affect boostable check.
+        */
+       if (can_boost(p->ainsn.insn))
+               p->ainsn.boostable = 0;
+       else
+               p->ainsn.boostable = -1;
+
+       /* Also, displacement change doesn't affect the first byte */
+       p->opcode = p->ainsn.insn[0];
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+       if (alternatives_text_reserved(p->addr, p->addr))
+               return -EINVAL;
+
+       if (!can_probe((unsigned long)p->addr))
+               return -EILSEQ;
+       /* insn: must be on special executable page on x86. */
+       p->ainsn.insn = get_insn_slot();
+       if (!p->ainsn.insn)
+               return -ENOMEM;
+       arch_copy_kprobe(p);
+       return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+       text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+       text_poke(p->addr, &p->opcode, 1);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+       if (p->ainsn.insn) {
+               free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+               p->ainsn.insn = NULL;
+       }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       kcb->prev_kprobe.kp = kprobe_running();
+       kcb->prev_kprobe.status = kcb->kprobe_status;
+       kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
+       kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+       kcb->kprobe_status = kcb->prev_kprobe.status;
+       kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
+       kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+                               struct kprobe_ctlblk *kcb)
+{
+       __this_cpu_write(current_kprobe, p);
+       kcb->kprobe_saved_flags = kcb->kprobe_old_flags
+               = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+       if (is_IF_modifier(p->ainsn.insn))
+               kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
+}
+
+static void __kprobes clear_btf(void)
+{
+       if (test_thread_flag(TIF_BLOCKSTEP)) {
+               unsigned long debugctl = get_debugctlmsr();
+
+               debugctl &= ~DEBUGCTLMSR_BTF;
+               update_debugctlmsr(debugctl);
+       }
+}
+
+static void __kprobes restore_btf(void)
+{
+       if (test_thread_flag(TIF_BLOCKSTEP)) {
+               unsigned long debugctl = get_debugctlmsr();
+
+               debugctl |= DEBUGCTLMSR_BTF;
+               update_debugctlmsr(debugctl);
+       }
+}
+
+void __kprobes
+arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+       unsigned long *sara = stack_addr(regs);
+
+       ri->ret_addr = (kprobe_opcode_t *) *sara;
+
+       /* Replace the return addr with trampoline addr */
+       *sara = (unsigned long) &kretprobe_trampoline;
+}
+
+static void __kprobes
+setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter)
+{
+       if (setup_detour_execution(p, regs, reenter))
+               return;
+
+#if !defined(CONFIG_PREEMPT)
+       if (p->ainsn.boostable == 1 && !p->post_handler) {
+               /* Boost up -- we can execute copied instructions directly */
+               if (!reenter)
+                       reset_current_kprobe();
+               /*
+                * Reentering boosted probe doesn't reset current_kprobe,
+                * nor set current_kprobe, because it doesn't use single
+                * stepping.
+                */
+               regs->ip = (unsigned long)p->ainsn.insn;
+               preempt_enable_no_resched();
+               return;
+       }
+#endif
+       if (reenter) {
+               save_previous_kprobe(kcb);
+               set_current_kprobe(p, regs, kcb);
+               kcb->kprobe_status = KPROBE_REENTER;
+       } else
+               kcb->kprobe_status = KPROBE_HIT_SS;
+       /* Prepare real single stepping */
+       clear_btf();
+       regs->flags |= X86_EFLAGS_TF;
+       regs->flags &= ~X86_EFLAGS_IF;
+       /* single step inline if the instruction is an int3 */
+       if (p->opcode == BREAKPOINT_INSTRUCTION)
+               regs->ip = (unsigned long)p->addr;
+       else
+               regs->ip = (unsigned long)p->ainsn.insn;
+}
+
+/*
+ * We have reentered the kprobe_handler(), since another probe was hit while
+ * within the handler. We save the original kprobes variables and just single
+ * step on the instruction of the new probe without calling any user handlers.
+ */
+static int __kprobes
+reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+{
+       switch (kcb->kprobe_status) {
+       case KPROBE_HIT_SSDONE:
+       case KPROBE_HIT_ACTIVE:
+               kprobes_inc_nmissed_count(p);
+               setup_singlestep(p, regs, kcb, 1);
+               break;
+       case KPROBE_HIT_SS:
+               /* A probe has been hit in the codepath leading up to, or just
+                * after, single-stepping of a probed instruction. This entire
+                * codepath should strictly reside in .kprobes.text section.
+                * Raise a BUG or we'll continue in an endless reentering loop
+                * and eventually a stack overflow.
+                */
+               printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
+                      p->addr);
+               dump_kprobe(p);
+               BUG();
+       default:
+               /* impossible cases */
+               WARN_ON(1);
+               return 0;
+       }
+
+       return 1;
+}
+
+/*
+ * Interrupts are disabled on entry as trap3 is an interrupt gate and they
+ * remain disabled throughout this function.
+ */
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+       kprobe_opcode_t *addr;
+       struct kprobe *p;
+       struct kprobe_ctlblk *kcb;
+
+       addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
+       /*
+        * We don't want to be preempted for the entire
+        * duration of kprobe processing. We conditionally
+        * re-enable preemption at the end of this function,
+        * and also in reenter_kprobe() and setup_singlestep().
+        */
+       preempt_disable();
+
+       kcb = get_kprobe_ctlblk();
+       p = get_kprobe(addr);
+
+       if (p) {
+               if (kprobe_running()) {
+                       if (reenter_kprobe(p, regs, kcb))
+                               return 1;
+               } else {
+                       set_current_kprobe(p, regs, kcb);
+                       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+                       /*
+                        * If we have no pre-handler or it returned 0, we
+                        * continue with normal processing.  If we have a
+                        * pre-handler and it returned non-zero, it prepped
+                        * for calling the break_handler below on re-entry
+                        * for jprobe processing, so get out doing nothing
+                        * more here.
+                        */
+                       if (!p->pre_handler || !p->pre_handler(p, regs))
+                               setup_singlestep(p, regs, kcb, 0);
+                       return 1;
+               }
+       } else if (*addr != BREAKPOINT_INSTRUCTION) {
+               /*
+                * The breakpoint instruction was removed right
+                * after we hit it.  Another cpu has removed
+                * either a probepoint or a debugger breakpoint
+                * at this address.  In either case, no further
+                * handling of this interrupt is appropriate.
+                * Back up over the (now missing) int3 and run
+                * the original instruction.
+                */
+               regs->ip = (unsigned long)addr;
+               preempt_enable_no_resched();
+               return 1;
+       } else if (kprobe_running()) {
+               p = __this_cpu_read(current_kprobe);
+               if (p->break_handler && p->break_handler(p, regs)) {
+                       if (!skip_singlestep(p, regs, kcb))
+                               setup_singlestep(p, regs, kcb, 0);
+                       return 1;
+               }
+       } /* else: not a kprobe fault; let the kernel handle it */
+
+       preempt_enable_no_resched();
+       return 0;
+}
+
+/*
+ * When a retprobed function returns, this code saves registers and
+ * calls trampoline_handler() runs, which calls the kretprobe's handler.
+ */
+static void __used __kprobes kretprobe_trampoline_holder(void)
+{
+       asm volatile (
+                       ".global kretprobe_trampoline\n"
+                       "kretprobe_trampoline: \n"
+#ifdef CONFIG_X86_64
+                       /* We don't bother saving the ss register */
+                       "       pushq %rsp\n"
+                       "       pushfq\n"
+                       SAVE_REGS_STRING
+                       "       movq %rsp, %rdi\n"
+                       "       call trampoline_handler\n"
+                       /* Replace saved sp with true return address. */
+                       "       movq %rax, 152(%rsp)\n"
+                       RESTORE_REGS_STRING
+                       "       popfq\n"
+#else
+                       "       pushf\n"
+                       SAVE_REGS_STRING
+                       "       movl %esp, %eax\n"
+                       "       call trampoline_handler\n"
+                       /* Move flags to cs */
+                       "       movl 56(%esp), %edx\n"
+                       "       movl %edx, 52(%esp)\n"
+                       /* Replace saved flags with true return address. */
+                       "       movl %eax, 56(%esp)\n"
+                       RESTORE_REGS_STRING
+                       "       popf\n"
+#endif
+                       "       ret\n");
+}
+
+/*
+ * Called from kretprobe_trampoline
+ */
+static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+{
+       struct kretprobe_instance *ri = NULL;
+       struct hlist_head *head, empty_rp;
+       struct hlist_node *node, *tmp;
+       unsigned long flags, orig_ret_address = 0;
+       unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+       kprobe_opcode_t *correct_ret_addr = NULL;
+
+       INIT_HLIST_HEAD(&empty_rp);
+       kretprobe_hash_lock(current, &head, &flags);
+       /* fixup registers */
+#ifdef CONFIG_X86_64
+       regs->cs = __KERNEL_CS;
+#else
+       regs->cs = __KERNEL_CS | get_kernel_rpl();
+       regs->gs = 0;
+#endif
+       regs->ip = trampoline_address;
+       regs->orig_ax = ~0UL;
+
+       /*
+        * It is possible to have multiple instances associated with a given
+        * task either because multiple functions in the call path have
+        * return probes installed on them, and/or more than one
+        * return probe was registered for a target function.
+        *
+        * We can handle this because:
+        *     - instances are always pushed into the head of the list
+        *     - when multiple return probes are registered for the same
+        *       function, the (chronologically) first instance's ret_addr
+        *       will be the real return address, and all the rest will
+        *       point to kretprobe_trampoline.
+        */
+       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
+
+               if (orig_ret_address != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+       correct_ret_addr = ri->ret_addr;
+       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
+               if (ri->rp && ri->rp->handler) {
+                       __this_cpu_write(current_kprobe, &ri->rp->kp);
+                       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+                       ri->ret_addr = correct_ret_addr;
+                       ri->rp->handler(ri, regs);
+                       __this_cpu_write(current_kprobe, NULL);
+               }
+
+               recycle_rp_inst(ri, &empty_rp);
+
+               if (orig_ret_address != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       kretprobe_hash_unlock(current, &flags);
+
+       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+               hlist_del(&ri->hlist);
+               kfree(ri);
+       }
+       return (void *)orig_ret_address;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "int 3"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * interrupt.  We have to fix up the stack as follows:
+ *
+ * 0) Except in the case of absolute or indirect jump or call instructions,
+ * the new ip is relative to the copied instruction.  We need to make
+ * it relative to the original instruction.
+ *
+ * 1) If the single-stepped instruction was pushfl, then the TF and IF
+ * flags are set in the just-pushed flags, and may need to be cleared.
+ *
+ * 2) If the single-stepped instruction was a call, the return address
+ * that is atop the stack is the address following the copied instruction.
+ * We need to make it the address following the original instruction.
+ *
+ * If this is the first time we've single-stepped the instruction at
+ * this probepoint, and the instruction is boostable, boost it: add a
+ * jump instruction after the copied instruction, that jumps to the next
+ * instruction after the probepoint.
+ */
+static void __kprobes
+resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+{
+       unsigned long *tos = stack_addr(regs);
+       unsigned long copy_ip = (unsigned long)p->ainsn.insn;
+       unsigned long orig_ip = (unsigned long)p->addr;
+       kprobe_opcode_t *insn = p->ainsn.insn;
+
+       /* Skip prefixes */
+       insn = skip_prefixes(insn);
+
+       regs->flags &= ~X86_EFLAGS_TF;
+       switch (*insn) {
+       case 0x9c:      /* pushfl */
+               *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
+               *tos |= kcb->kprobe_old_flags;
+               break;
+       case 0xc2:      /* iret/ret/lret */
+       case 0xc3:
+       case 0xca:
+       case 0xcb:
+       case 0xcf:
+       case 0xea:      /* jmp absolute -- ip is correct */
+               /* ip is already adjusted, no more changes required */
+               p->ainsn.boostable = 1;
+               goto no_change;
+       case 0xe8:      /* call relative - Fix return addr */
+               *tos = orig_ip + (*tos - copy_ip);
+               break;
+#ifdef CONFIG_X86_32
+       case 0x9a:      /* call absolute -- same as call absolute, indirect */
+               *tos = orig_ip + (*tos - copy_ip);
+               goto no_change;
+#endif
+       case 0xff:
+               if ((insn[1] & 0x30) == 0x10) {
+                       /*
+                        * call absolute, indirect
+                        * Fix return addr; ip is correct.
+                        * But this is not boostable
+                        */
+                       *tos = orig_ip + (*tos - copy_ip);
+                       goto no_change;
+               } else if (((insn[1] & 0x31) == 0x20) ||
+                          ((insn[1] & 0x31) == 0x21)) {
+                       /*
+                        * jmp near and far, absolute indirect
+                        * ip is correct. And this is boostable
+                        */
+                       p->ainsn.boostable = 1;
+                       goto no_change;
+               }
+       default:
+               break;
+       }
+
+       if (p->ainsn.boostable == 0) {
+               if ((regs->ip > copy_ip) &&
+                   (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
+                       /*
+                        * These instructions can be executed directly if it
+                        * jumps back to correct address.
+                        */
+                       synthesize_reljump((void *)regs->ip,
+                               (void *)orig_ip + (regs->ip - copy_ip));
+                       p->ainsn.boostable = 1;
+               } else {
+                       p->ainsn.boostable = -1;
+               }
+       }
+
+       regs->ip += orig_ip - copy_ip;
+
+no_change:
+       restore_btf();
+}
+
+/*
+ * Interrupts are disabled on entry as trap1 is an interrupt gate and they
+ * remain disabled throughout this function.
+ */
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (!cur)
+               return 0;
+
+       resume_execution(cur, regs, kcb);
+       regs->flags |= kcb->kprobe_saved_flags;
+
+       if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+               cur->post_handler(cur, regs, 0);
+       }
+
+       /* Restore back the original saved kprobes variables and continue. */
+       if (kcb->kprobe_status == KPROBE_REENTER) {
+               restore_previous_kprobe(kcb);
+               goto out;
+       }
+       reset_current_kprobe();
+out:
+       preempt_enable_no_resched();
+
+       /*
+        * if somebody else is singlestepping across a probe point, flags
+        * will have TF set, in which case, continue the remaining processing
+        * of do_debug, as if this is not a probe hit.
+        */
+       if (regs->flags & X86_EFLAGS_TF)
+               return 0;
+
+       return 1;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       switch (kcb->kprobe_status) {
+       case KPROBE_HIT_SS:
+       case KPROBE_REENTER:
+               /*
+                * We are here because the instruction being single
+                * stepped caused a page fault. We reset the current
+                * kprobe and the ip points back to the probe address
+                * and allow the page fault handler to continue as a
+                * normal page fault.
+                */
+               regs->ip = (unsigned long)cur->addr;
+               regs->flags |= kcb->kprobe_old_flags;
+               if (kcb->kprobe_status == KPROBE_REENTER)
+                       restore_previous_kprobe(kcb);
+               else
+                       reset_current_kprobe();
+               preempt_enable_no_resched();
+               break;
+       case KPROBE_HIT_ACTIVE:
+       case KPROBE_HIT_SSDONE:
+               /*
+                * We increment the nmissed count for accounting,
+                * we can also use npre/npostfault count for accounting
+                * these specific fault cases.
+                */
+               kprobes_inc_nmissed_count(cur);
+
+               /*
+                * We come here because instructions in the pre/post
+                * handler caused the page_fault, this could happen
+                * if handler tries to access user space by
+                * copy_from_user(), get_user() etc. Let the
+                * user-specified handler try to fix it first.
+                */
+               if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+                       return 1;
+
+               /*
+                * In case the user-specified fault handler returned
+                * zero, try to fix up.
+                */
+               if (fixup_exception(regs))
+                       return 1;
+
+               /*
+                * fixup routine could not handle it,
+                * Let do_page_fault() fix it.
+                */
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes
+kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+       struct die_args *args = data;
+       int ret = NOTIFY_DONE;
+
+       if (args->regs && user_mode_vm(args->regs))
+               return ret;
+
+       switch (val) {
+       case DIE_INT3:
+               if (kprobe_handler(args->regs))
+                       ret = NOTIFY_STOP;
+               break;
+       case DIE_DEBUG:
+               if (post_kprobe_handler(args->regs)) {
+                       /*
+                        * Reset the BS bit in dr6 (pointed by args->err) to
+                        * denote completion of processing
+                        */
+                       (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
+                       ret = NOTIFY_STOP;
+               }
+               break;
+       case DIE_GPF:
+               /*
+                * To be potentially processing a kprobe fault and to
+                * trust the result from kprobe_running(), we have
+                * be non-preemptible.
+                */
+               if (!preemptible() && kprobe_running() &&
+                   kprobe_fault_handler(args->regs, args->trapnr))
+                       ret = NOTIFY_STOP;
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct jprobe *jp = container_of(p, struct jprobe, kp);
+       unsigned long addr;
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       kcb->jprobe_saved_regs = *regs;
+       kcb->jprobe_saved_sp = stack_addr(regs);
+       addr = (unsigned long)(kcb->jprobe_saved_sp);
+
+       /*
+        * As Linus pointed out, gcc assumes that the callee
+        * owns the argument space and could overwrite it, e.g.
+        * tailcall optimization. So, to be absolutely safe
+        * we also save and restore enough stack bytes to cover
+        * the argument area.
+        */
+       memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+              MIN_STACK_SIZE(addr));
+       regs->flags &= ~X86_EFLAGS_IF;
+       trace_hardirqs_off();
+       regs->ip = (unsigned long)(jp->entry);
+       return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       asm volatile (
+#ifdef CONFIG_X86_64
+                       "       xchg   %%rbx,%%rsp      \n"
+#else
+                       "       xchgl   %%ebx,%%esp     \n"
+#endif
+                       "       int3                    \n"
+                       "       .globl jprobe_return_end\n"
+                       "       jprobe_return_end:      \n"
+                       "       nop                     \n"::"b"
+                       (kcb->jprobe_saved_sp):"memory");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       u8 *addr = (u8 *) (regs->ip - 1);
+       struct jprobe *jp = container_of(p, struct jprobe, kp);
+
+       if ((addr > (u8 *) jprobe_return) &&
+           (addr < (u8 *) jprobe_return_end)) {
+               if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+                       struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
+                       printk(KERN_ERR
+                              "current sp %p does not match saved sp %p\n",
+                              stack_addr(regs), kcb->jprobe_saved_sp);
+                       printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
+                       show_regs(saved_regs);
+                       printk(KERN_ERR "Current registers\n");
+                       show_regs(regs);
+                       BUG();
+               }
+               *regs = kcb->jprobe_saved_regs;
+               memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
+                      kcb->jprobes_stack,
+                      MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+               preempt_enable_no_resched();
+               return 1;
+       }
+       return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+       return arch_init_optprobes();
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+       return 0;
+}
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
new file mode 100644 (file)
index 0000000..23ef5c5
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+#include "common.h"
+
+static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                            struct kprobe_ctlblk *kcb)
+{
+       /*
+        * Emulate singlestep (and also recover regs->ip)
+        * as if there is a 5byte nop
+        */
+       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+       if (unlikely(p->post_handler)) {
+               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+               p->post_handler(p, regs, 0);
+       }
+       __this_cpu_write(current_kprobe, NULL);
+       return 1;
+}
+
+int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                             struct kprobe_ctlblk *kcb)
+{
+       if (kprobe_ftrace(p))
+               return __skip_singlestep(p, regs, kcb);
+       else
+               return 0;
+}
+
+/* Ftrace callback handler for kprobes */
+void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+                                    struct ftrace_ops *ops, struct pt_regs *regs)
+{
+       struct kprobe *p;
+       struct kprobe_ctlblk *kcb;
+       unsigned long flags;
+
+       /* Disable irq for emulating a breakpoint and avoiding preempt */
+       local_irq_save(flags);
+
+       p = get_kprobe((kprobe_opcode_t *)ip);
+       if (unlikely(!p) || kprobe_disabled(p))
+               goto end;
+
+       kcb = get_kprobe_ctlblk();
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(p);
+       } else {
+               /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+               regs->ip = ip + sizeof(kprobe_opcode_t);
+
+               __this_cpu_write(current_kprobe, p);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+               if (!p->pre_handler || !p->pre_handler(p, regs))
+                       __skip_singlestep(p, regs, kcb);
+               /*
+                * If pre_handler returns !0, it sets regs->ip and
+                * resets current kprobe.
+                */
+       }
+end:
+       local_irq_restore(flags);
+}
+
+int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+       p->ainsn.insn = NULL;
+       p->ainsn.boostable = -1;
+       return 0;
+}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
new file mode 100644 (file)
index 0000000..76dc6f0
--- /dev/null
@@ -0,0 +1,512 @@
+/*
+ *  Kernel Probes Jump Optimization (Optprobes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Hitachi Ltd., 2012
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/module.h>
+#include <linux/kdebug.h>
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/insn.h>
+#include <asm/debugreg.h>
+
+#include "common.h"
+
+unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
+{
+       struct optimized_kprobe *op;
+       struct kprobe *kp;
+       long offs;
+       int i;
+
+       for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
+               kp = get_kprobe((void *)addr - i);
+               /* This function only handles jump-optimized kprobe */
+               if (kp && kprobe_optimized(kp)) {
+                       op = container_of(kp, struct optimized_kprobe, kp);
+                       /* If op->list is not empty, op is under optimizing */
+                       if (list_empty(&op->list))
+                               goto found;
+               }
+       }
+
+       return addr;
+found:
+       /*
+        * If the kprobe can be optimized, original bytes which can be
+        * overwritten by jump destination address. In this case, original
+        * bytes must be recovered from op->optinsn.copied_insn buffer.
+        */
+       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (addr == (unsigned long)kp->addr) {
+               buf[0] = kp->opcode;
+               memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+       } else {
+               offs = addr - (unsigned long)kp->addr - 1;
+               memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
+       }
+
+       return (unsigned long)buf;
+}
+
+/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
+static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+{
+#ifdef CONFIG_X86_64
+       *addr++ = 0x48;
+       *addr++ = 0xbf;
+#else
+       *addr++ = 0xb8;
+#endif
+       *(unsigned long *)addr = val;
+}
+
+static void __used __kprobes kprobes_optinsn_template_holder(void)
+{
+       asm volatile (
+                       ".global optprobe_template_entry\n"
+                       "optprobe_template_entry:\n"
+#ifdef CONFIG_X86_64
+                       /* We don't bother saving the ss register */
+                       "       pushq %rsp\n"
+                       "       pushfq\n"
+                       SAVE_REGS_STRING
+                       "       movq %rsp, %rsi\n"
+                       ".global optprobe_template_val\n"
+                       "optprobe_template_val:\n"
+                       ASM_NOP5
+                       ASM_NOP5
+                       ".global optprobe_template_call\n"
+                       "optprobe_template_call:\n"
+                       ASM_NOP5
+                       /* Move flags to rsp */
+                       "       movq 144(%rsp), %rdx\n"
+                       "       movq %rdx, 152(%rsp)\n"
+                       RESTORE_REGS_STRING
+                       /* Skip flags entry */
+                       "       addq $8, %rsp\n"
+                       "       popfq\n"
+#else /* CONFIG_X86_32 */
+                       "       pushf\n"
+                       SAVE_REGS_STRING
+                       "       movl %esp, %edx\n"
+                       ".global optprobe_template_val\n"
+                       "optprobe_template_val:\n"
+                       ASM_NOP5
+                       ".global optprobe_template_call\n"
+                       "optprobe_template_call:\n"
+                       ASM_NOP5
+                       RESTORE_REGS_STRING
+                       "       addl $4, %esp\n"        /* skip cs */
+                       "       popf\n"
+#endif
+                       ".global optprobe_template_end\n"
+                       "optprobe_template_end:\n");
+}
+
+#define TMPL_MOVE_IDX \
+       ((long)&optprobe_template_val - (long)&optprobe_template_entry)
+#define TMPL_CALL_IDX \
+       ((long)&optprobe_template_call - (long)&optprobe_template_entry)
+#define TMPL_END_IDX \
+       ((long)&optprobe_template_end - (long)&optprobe_template_entry)
+
+#define INT3_SIZE sizeof(kprobe_opcode_t)
+
+/* Optimized kprobe call back function: called from optinsn */
+static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       unsigned long flags;
+
+       /* This is possible if op is under delayed unoptimizing */
+       if (kprobe_disabled(&op->kp))
+               return;
+
+       local_irq_save(flags);
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(&op->kp);
+       } else {
+               /* Save skipped registers */
+#ifdef CONFIG_X86_64
+               regs->cs = __KERNEL_CS;
+#else
+               regs->cs = __KERNEL_CS | get_kernel_rpl();
+               regs->gs = 0;
+#endif
+               regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
+               regs->orig_ax = ~0UL;
+
+               __this_cpu_write(current_kprobe, &op->kp);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+               opt_pre_handler(&op->kp, regs);
+               __this_cpu_write(current_kprobe, NULL);
+       }
+       local_irq_restore(flags);
+}
+
+static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
+{
+       int len = 0, ret;
+
+       while (len < RELATIVEJUMP_SIZE) {
+               ret = __copy_instruction(dest + len, src + len);
+               if (!ret || !can_boost(dest + len))
+                       return -EINVAL;
+               len += ret;
+       }
+       /* Check whether the address range is reserved */
+       if (ftrace_text_reserved(src, src + len - 1) ||
+           alternatives_text_reserved(src, src + len - 1) ||
+           jump_label_text_reserved(src, src + len - 1))
+               return -EBUSY;
+
+       return len;
+}
+
+/* Check whether insn is indirect jump */
+static int __kprobes insn_is_indirect_jump(struct insn *insn)
+{
+       return ((insn->opcode.bytes[0] == 0xff &&
+               (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
+               insn->opcode.bytes[0] == 0xea); /* Segment based jump */
+}
+
+/* Check whether insn jumps into specified address range */
+static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
+{
+       unsigned long target = 0;
+
+       switch (insn->opcode.bytes[0]) {
+       case 0xe0:      /* loopne */
+       case 0xe1:      /* loope */
+       case 0xe2:      /* loop */
+       case 0xe3:      /* jcxz */
+       case 0xe9:      /* near relative jump */
+       case 0xeb:      /* short relative jump */
+               break;
+       case 0x0f:
+               if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
+                       break;
+               return 0;
+       default:
+               if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
+                       break;
+               return 0;
+       }
+       target = (unsigned long)insn->next_byte + insn->immediate.value;
+
+       return (start <= target && target <= start + len);
+}
+
+/* Decode whole function to ensure any instructions don't jump into target */
+static int __kprobes can_optimize(unsigned long paddr)
+{
+       unsigned long addr, size = 0, offset = 0;
+       struct insn insn;
+       kprobe_opcode_t buf[MAX_INSN_SIZE];
+
+       /* Lookup symbol including addr */
+       if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
+               return 0;
+
+       /*
+        * Do not optimize in the entry code due to the unstable
+        * stack handling.
+        */
+       if ((paddr >= (unsigned long)__entry_text_start) &&
+           (paddr <  (unsigned long)__entry_text_end))
+               return 0;
+
+       /* Check there is enough space for a relative jump. */
+       if (size - offset < RELATIVEJUMP_SIZE)
+               return 0;
+
+       /* Decode instructions */
+       addr = paddr - offset;
+       while (addr < paddr - offset + size) { /* Decode until function end */
+               if (search_exception_tables(addr))
+                       /*
+                        * Since some fixup code will jumps into this function,
+                        * we can't optimize kprobe in this function.
+                        */
+                       return 0;
+               kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
+               insn_get_length(&insn);
+               /* Another subsystem puts a breakpoint */
+               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+                       return 0;
+               /* Recover address */
+               insn.kaddr = (void *)addr;
+               insn.next_byte = (void *)(addr + insn.length);
+               /* Check any instructions don't jump into target */
+               if (insn_is_indirect_jump(&insn) ||
+                   insn_jump_into_range(&insn, paddr + INT3_SIZE,
+                                        RELATIVE_ADDR_SIZE))
+                       return 0;
+               addr += insn.length;
+       }
+
+       return 1;
+}
+
+/* Check optimized_kprobe can actually be optimized. */
+int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
+{
+       int i;
+       struct kprobe *p;
+
+       for (i = 1; i < op->optinsn.size; i++) {
+               p = get_kprobe(op->kp.addr + i);
+               if (p && !kprobe_disabled(p))
+                       return -EEXIST;
+       }
+
+       return 0;
+}
+
+/* Check the addr is within the optimized instructions. */
+int __kprobes
+arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
+{
+       return ((unsigned long)op->kp.addr <= addr &&
+               (unsigned long)op->kp.addr + op->optinsn.size > addr);
+}
+
+/* Free optimized instruction slot */
+static __kprobes
+void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+{
+       if (op->optinsn.insn) {
+               free_optinsn_slot(op->optinsn.insn, dirty);
+               op->optinsn.insn = NULL;
+               op->optinsn.size = 0;
+       }
+}
+
+void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+{
+       __arch_remove_optimized_kprobe(op, 1);
+}
+
+/*
+ * Copy replacing target instructions
+ * Target instructions MUST be relocatable (checked inside)
+ * This is called when new aggr(opt)probe is allocated or reused.
+ */
+int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+{
+       u8 *buf;
+       int ret;
+       long rel;
+
+       if (!can_optimize((unsigned long)op->kp.addr))
+               return -EILSEQ;
+
+       op->optinsn.insn = get_optinsn_slot();
+       if (!op->optinsn.insn)
+               return -ENOMEM;
+
+       /*
+        * Verify if the address gap is in 2GB range, because this uses
+        * a relative jump.
+        */
+       rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+       if (abs(rel) > 0x7fffffff)
+               return -ERANGE;
+
+       buf = (u8 *)op->optinsn.insn;
+
+       /* Copy instructions into the out-of-line buffer */
+       ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
+       if (ret < 0) {
+               __arch_remove_optimized_kprobe(op, 0);
+               return ret;
+       }
+       op->optinsn.size = ret;
+
+       /* Copy arch-dep-instance from template */
+       memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
+
+       /* Set probe information */
+       synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+       /* Set probe function call */
+       synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
+
+       /* Set returning jmp instruction at the tail of out-of-line buffer */
+       synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+                          (u8 *)op->kp.addr + op->optinsn.size);
+
+       flush_icache_range((unsigned long) buf,
+                          (unsigned long) buf + TMPL_END_IDX +
+                          op->optinsn.size + RELATIVEJUMP_SIZE);
+       return 0;
+}
+
+#define MAX_OPTIMIZE_PROBES 256
+static struct text_poke_param *jump_poke_params;
+static struct jump_poke_buffer {
+       u8 buf[RELATIVEJUMP_SIZE];
+} *jump_poke_bufs;
+
+static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
+                                           u8 *insn_buf,
+                                           struct optimized_kprobe *op)
+{
+       s32 rel = (s32)((long)op->optinsn.insn -
+                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+       /* Backup instructions which will be replaced by jump address */
+       memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
+              RELATIVE_ADDR_SIZE);
+
+       insn_buf[0] = RELATIVEJUMP_OPCODE;
+       *(s32 *)(&insn_buf[1]) = rel;
+
+       tprm->addr = op->kp.addr;
+       tprm->opcode = insn_buf;
+       tprm->len = RELATIVEJUMP_SIZE;
+}
+
+/*
+ * Replace breakpoints (int3) with relative jumps.
+ * Caller must call with locking kprobe_mutex and text_mutex.
+ */
+void __kprobes arch_optimize_kprobes(struct list_head *oplist)
+{
+       struct optimized_kprobe *op, *tmp;
+       int c = 0;
+
+       list_for_each_entry_safe(op, tmp, oplist, list) {
+               WARN_ON(kprobe_disabled(&op->kp));
+               /* Setup param */
+               setup_optimize_kprobe(&jump_poke_params[c],
+                                     jump_poke_bufs[c].buf, op);
+               list_del_init(&op->list);
+               if (++c >= MAX_OPTIMIZE_PROBES)
+                       break;
+       }
+
+       /*
+        * text_poke_smp doesn't support NMI/MCE code modifying.
+        * However, since kprobes itself also doesn't support NMI/MCE
+        * code probing, it's not a problem.
+        */
+       text_poke_smp_batch(jump_poke_params, c);
+}
+
+static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
+                                             u8 *insn_buf,
+                                             struct optimized_kprobe *op)
+{
+       /* Set int3 to first byte for kprobes */
+       insn_buf[0] = BREAKPOINT_INSTRUCTION;
+       memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+
+       tprm->addr = op->kp.addr;
+       tprm->opcode = insn_buf;
+       tprm->len = RELATIVEJUMP_SIZE;
+}
+
+/*
+ * Recover original instructions and breakpoints from relative jumps.
+ * Caller must call with locking kprobe_mutex.
+ */
+extern void arch_unoptimize_kprobes(struct list_head *oplist,
+                                   struct list_head *done_list)
+{
+       struct optimized_kprobe *op, *tmp;
+       int c = 0;
+
+       list_for_each_entry_safe(op, tmp, oplist, list) {
+               /* Setup param */
+               setup_unoptimize_kprobe(&jump_poke_params[c],
+                                       jump_poke_bufs[c].buf, op);
+               list_move(&op->list, done_list);
+               if (++c >= MAX_OPTIMIZE_PROBES)
+                       break;
+       }
+
+       /*
+        * text_poke_smp doesn't support NMI/MCE code modifying.
+        * However, since kprobes itself also doesn't support NMI/MCE
+        * code probing, it's not a problem.
+        */
+       text_poke_smp_batch(jump_poke_params, c);
+}
+
+/* Replace a relative jump with a breakpoint (int3).  */
+void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
+{
+       u8 buf[RELATIVEJUMP_SIZE];
+
+       /* Set int3 to first byte for kprobes */
+       buf[0] = BREAKPOINT_INSTRUCTION;
+       memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+       text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
+}
+
+int  __kprobes
+setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
+{
+       struct optimized_kprobe *op;
+
+       if (p->flags & KPROBE_FLAG_OPTIMIZED) {
+               /* This kprobe is really able to run optimized path. */
+               op = container_of(p, struct optimized_kprobe, kp);
+               /* Detour through copied instructions */
+               regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
+               if (!reenter)
+                       reset_current_kprobe();
+               preempt_enable_no_resched();
+               return 1;
+       }
+       return 0;
+}
+
+int __kprobes arch_init_optprobes(void)
+{
+       /* Allocate code buffer and parameter array */
+       jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
+                                MAX_OPTIMIZE_PROBES, GFP_KERNEL);
+       if (!jump_poke_bufs)
+               return -ENOMEM;
+
+       jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
+                                  MAX_OPTIMIZE_PROBES, GFP_KERNEL);
+       if (!jump_poke_params) {
+               kfree(jump_poke_bufs);
+               jump_poke_bufs = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
index c71025b674623133219377a70b1e27f380a95ffa..0ba4cfb4f412dcc960e26107083535bf1ba10181 100644 (file)
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
                if (auprobe->insn[i] == 0x66)
                        continue;
 
-               if (auprobe->insn[i] == 0x90)
+               if (auprobe->insn[i] == 0x90) {
+                       regs->ip += i + 1;
                        return true;
+               }
 
                break;
        }
index e6defd86b42454e98d2828abf2d99240d9e68b18..1e5d8a40101e274f5d6ce2bfe1a24a486a671af7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/time.h>
 #include <linux/cper.h>
 #include <linux/acpi.h>
+#include <linux/pci.h>
 #include <linux/aer.h>
 
 /*
@@ -249,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = {
 static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                            const struct acpi_hest_generic_data *gdata)
 {
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+       struct pci_dev *dev;
+#endif
+
        if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
                printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
                       pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -281,10 +286,18 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
        "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
        pfx, pcie->bridge.secondary_status, pcie->bridge.control);
 #ifdef CONFIG_ACPI_APEI_PCIEAER
-       if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
-               struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
-               cper_print_aer(pfx, gdata->error_severity, aer_regs);
+       dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
+                       pcie->device_id.bus, pcie->device_id.function);
+       if (!dev) {
+               pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
+                       pcie->device_id.segment, pcie->device_id.bus,
+                       pcie->device_id.slot, pcie->device_id.function);
+               return;
        }
+       if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
+               cper_print_aer(pfx, dev, gdata->error_severity,
+                               (struct aer_capability_regs *) pcie->aer_info);
+       pci_dev_put(dev);
 #endif
 }
 
index 3ea51736f18db45be7c40280626eb0d803c157b1..5ab14251839d0f06c395ac0dfc064c166cf3b75a 100644 (file)
@@ -23,6 +23,9 @@
 
 #include "aerdrv.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/ras.h>
+
 #define AER_AGENT_RECEIVER             0
 #define AER_AGENT_REQUESTER            1
 #define AER_AGENT_COMPLETER            2
@@ -121,12 +124,11 @@ static const char *aer_agent_string[] = {
        "Transmitter ID"
 };
 
-static void __aer_print_error(const char *prefix,
+static void __aer_print_error(struct pci_dev *dev,
                              struct aer_err_info *info)
 {
        int i, status;
        const char *errmsg = NULL;
-
        status = (info->status & ~info->mask);
 
        for (i = 0; i < 32; i++) {
@@ -141,26 +143,22 @@ static void __aer_print_error(const char *prefix,
                                aer_uncorrectable_error_string[i] : NULL;
 
                if (errmsg)
-                       printk("%s""   [%2d] %-22s%s\n", prefix, i, errmsg,
+                       dev_err(&dev->dev, "   [%2d] %-22s%s\n", i, errmsg,
                                info->first_error == i ? " (First)" : "");
                else
-                       printk("%s""   [%2d] Unknown Error Bit%s\n", prefix, i,
-                               info->first_error == i ? " (First)" : "");
+                       dev_err(&dev->dev, "   [%2d] Unknown Error Bit%s\n",
+                               i, info->first_error == i ? " (First)" : "");
        }
 }
 
 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
 {
        int id = ((dev->bus->number << 8) | dev->devfn);
-       char prefix[44];
-
-       snprintf(prefix, sizeof(prefix), "%s%s %s: ",
-                (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR,
-                dev_driver_string(&dev->dev), dev_name(&dev->dev));
 
        if (info->status == 0) {
-               printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, "
-                       "id=%04x(Unregistered Agent ID)\n", prefix,
+               dev_err(&dev->dev,
+                       "PCIe Bus Error: severity=%s, type=Unaccessible, "
+                       "id=%04x(Unregistered Agent ID)\n",
                        aer_error_severity_string[info->severity], id);
        } else {
                int layer, agent;
@@ -168,22 +166,24 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
                layer = AER_GET_LAYER_ERROR(info->severity, info->status);
                agent = AER_GET_AGENT(info->severity, info->status);
 
-               printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
-                       prefix, aer_error_severity_string[info->severity],
+               dev_err(&dev->dev,
+                       "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+                       aer_error_severity_string[info->severity],
                        aer_error_layer[layer], id, aer_agent_string[agent]);
 
-               printk("%s""  device [%04x:%04x] error status/mask=%08x/%08x\n",
-                       prefix, dev->vendor, dev->device,
+               dev_err(&dev->dev,
+                       "  device [%04x:%04x] error status/mask=%08x/%08x\n",
+                       dev->vendor, dev->device,
                        info->status, info->mask);
 
-               __aer_print_error(prefix, info);
+               __aer_print_error(dev, info);
 
                if (info->tlp_header_valid) {
                        unsigned char *tlp = (unsigned char *) &info->tlp;
-                       printk("%s""  TLP Header:"
+                       dev_err(&dev->dev, "  TLP Header:"
                                " %02x%02x%02x%02x %02x%02x%02x%02x"
                                " %02x%02x%02x%02x %02x%02x%02x%02x\n",
-                               prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+                               *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
                                *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
                                *(tlp + 11), *(tlp + 10), *(tlp + 9),
                                *(tlp + 8), *(tlp + 15), *(tlp + 14),
@@ -192,8 +192,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
        }
 
        if (info->id && info->error_dev_num > 1 && info->id == id)
-               printk("%s""  Error of this Agent(%04x) is reported first\n",
-                       prefix, id);
+               dev_err(&dev->dev,
+                          "  Error of this Agent(%04x) is reported first\n",
+                       id);
+       trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
+                       info->severity);
 }
 
 void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -217,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
 }
 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
 
-void cper_print_aer(const char *prefix, int cper_severity,
+void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
                    struct aer_capability_regs *aer)
 {
        int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -239,25 +242,27 @@ void cper_print_aer(const char *prefix, int cper_severity,
        }
        layer = AER_GET_LAYER_ERROR(aer_severity, status);
        agent = AER_GET_AGENT(aer_severity, status);
-       printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n",
-              prefix, status, mask);
+       dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+              status, mask);
        cper_print_bits(prefix, status, status_strs, status_strs_size);
-       printk("%s""aer_layer=%s, aer_agent=%s\n", prefix,
+       dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
               aer_error_layer[layer], aer_agent_string[agent]);
        if (aer_severity != AER_CORRECTABLE)
-               printk("%s""aer_uncor_severity: 0x%08x\n",
-                      prefix, aer->uncor_severity);
+               dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
+                      aer->uncor_severity);
        if (tlp_header_valid) {
                const unsigned char *tlp;
                tlp = (const unsigned char *)&aer->header_log;
-               printk("%s""aer_tlp_header:"
+               dev_err(&dev->dev, "aer_tlp_header:"
                        " %02x%02x%02x%02x %02x%02x%02x%02x"
                        " %02x%02x%02x%02x %02x%02x%02x%02x\n",
-                       prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+                       *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
                        *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
                        *(tlp + 11), *(tlp + 10), *(tlp + 9),
                        *(tlp + 8), *(tlp + 15), *(tlp + 14),
                        *(tlp + 13), *(tlp + 12));
        }
+       trace_aer_event(dev_name(&dev->dev), (status & ~mask),
+                       aer_severity);
 }
 #endif
index 544abdb2238ccdffda329816578b41bb954fc617..ec10e1b24c1cce50d50581d58c5000cf7b9e5565 100644 (file)
@@ -49,8 +49,8 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 }
 #endif
 
-extern void cper_print_aer(const char *prefix, int cper_severity,
-                          struct aer_capability_regs *aer);
+extern void cper_print_aer(const char *prefix, struct pci_dev *dev,
+                          int cper_severity, struct aer_capability_regs *aer);
 extern int cper_severity_to_aer(int cper_severity);
 extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
                              int severity);
index 92691d85c32061f4d0acbe45ecccfcd78267240d..e5ca8ef50e9bf674e46fa58d7fff5f0424ab5bad 100644 (file)
@@ -74,7 +74,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
  *            and passed to the callback. If this flag is set, but the
  *            architecture does not support passing regs
- *            (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
+ *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
  *            ftrace_ops will fail to register, unless the next flag
  *            is set.
  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
@@ -418,7 +418,7 @@ void ftrace_modify_all_code(int command);
 #endif
 
 #ifndef FTRACE_REGS_ADDR
-#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
 #else
 # define FTRACE_REGS_ADDR FTRACE_ADDR
@@ -480,7 +480,7 @@ extern int ftrace_make_nop(struct module *mod,
  */
 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
 
-#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 /**
  * ftrace_modify_call - convert from one addr to another (no nop)
  * @rec: the mcount call site record
index a3d489531d83a7c396aaa6a24d09961eeae47929..13a54d0bdfa8afbe2cc29ec6bf8ce016c1b2306c 100644 (file)
@@ -49,7 +49,6 @@ struct trace_entry {
        unsigned char           flags;
        unsigned char           preempt_count;
        int                     pid;
-       int                     padding;
 };
 
 #define FTRACE_MAX_EVENT                                               \
@@ -84,6 +83,9 @@ struct trace_iterator {
        long                    idx;
 
        cpumask_var_t           started;
+
+       /* it's true when current open file is snapshot */
+       bool                    snapshot;
 };
 
 enum trace_iter_flags {
@@ -272,7 +274,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
 extern int trace_add_event_call(struct ftrace_event_call *call);
 extern void trace_remove_event_call(struct ftrace_event_call *call);
 
-#define is_signed_type(type)   (((type)(-1)) < 0)
+#define is_signed_type(type)   (((type)(-1)) < (type)0)
 
 int trace_set_clr_event(const char *system, const char *event, int set);
 
index 624ef3f45c8efe51667a6d2d9f558e3d6351736f..57bfdce8fb9076be0397e600a88bfb9963def7e1 100644 (file)
@@ -180,10 +180,10 @@ extern void irq_exit(void);
 
 #define nmi_enter()                                            \
        do {                                                    \
+               lockdep_off();                                  \
                ftrace_nmi_enter();                             \
                BUG_ON(in_nmi());                               \
                add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
-               lockdep_off();                                  \
                rcu_nmi_enter();                                \
                trace_hardirq_enter();                          \
        } while (0)
@@ -192,10 +192,10 @@ extern void irq_exit(void);
        do {                                                    \
                trace_hardirq_exit();                           \
                rcu_nmi_exit();                                 \
-               lockdep_on();                                   \
                BUG_ON(!in_nmi());                              \
                sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
                ftrace_nmi_exit();                              \
+               lockdep_on();                                   \
        } while (0)
 
 #endif /* LINUX_HARDIRQ_H */
index 23755ba42abc456a06ffd663d0b3817570c1e7cb..4b6ef4d33cc26b65e242fb8f086cd3754672d962 100644 (file)
 #define KPROBE_REENTER         0x00000004
 #define KPROBE_HIT_SSDONE      0x00000008
 
-/*
- * If function tracer is enabled and the arch supports full
- * passing of pt_regs to function tracing, then kprobes can
- * optimize on top of function tracing.
- */
-#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
-       && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
-# define KPROBES_CAN_USE_FTRACE
-#endif
-
 /* Attach to insert probes on any functions which should be ignored*/
 #define __kprobes      __attribute__((__section__(".kprobes.text")))
 
@@ -316,7 +306,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
 #endif
 
 #endif /* CONFIG_OPTPROBES */
-#ifdef KPROBES_CAN_USE_FTRACE
+#ifdef CONFIG_KPROBES_ON_FTRACE
 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                                  struct ftrace_ops *ops, struct pt_regs *regs);
 extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
index 6bfb2faa0b1937555f5ca8fa937c7007bf5b3bc4..e47ee462c2f2e69a8cbef4ef3a3d7bfc873cb51f 100644 (file)
@@ -135,16 +135,21 @@ struct hw_perf_event {
                struct { /* software */
                        struct hrtimer  hrtimer;
                };
+               struct { /* tracepoint */
+                       struct task_struct      *tp_target;
+                       /* for tp_event->class */
+                       struct list_head        tp_list;
+               };
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                struct { /* breakpoint */
-                       struct arch_hw_breakpoint       info;
-                       struct list_head                bp_list;
                        /*
                         * Crufty hack to avoid the chicken and egg
                         * problem hw_breakpoint has with context
                         * creation and event initalization.
                         */
                        struct task_struct              *bp_target;
+                       struct arch_hw_breakpoint       info;
+                       struct list_head                bp_list;
                };
 #endif
        };
@@ -817,6 +822,17 @@ do {                                                                       \
 } while (0)
 
 
+struct perf_pmu_events_attr {
+       struct device_attribute attr;
+       u64 id;
+};
+
+#define PMU_EVENT_ATTR(_name, _var, _id, _show)                                \
+static struct perf_pmu_events_attr _var = {                            \
+       .attr = __ATTR(_name, 0444, _show, NULL),                       \
+       .id   =  _id,                                                   \
+};
+
 #define PMU_FORMAT_ATTR(_name, _format)                                        \
 static ssize_t                                                         \
 _name##_show(struct device *dev,                                       \
index a0fc32279fc09d53a94b73b4c9dc89645d637b54..21123902366d8eb2b9b9be26b78e98e8c9badf13 100644 (file)
@@ -82,9 +82,6 @@ int task_handoff_unregister(struct notifier_block * n);
 int profile_event_register(enum profile_type, struct notifier_block * n);
 int profile_event_unregister(enum profile_type, struct notifier_block * n);
 
-int register_timer_hook(int (*hook)(struct pt_regs *));
-void unregister_timer_hook(int (*hook)(struct pt_regs *));
-
 struct pt_regs;
 
 #else
@@ -135,16 +132,6 @@ static inline int profile_event_unregister(enum profile_type t, struct notifier_
 #define profile_handoff_task(a) (0)
 #define profile_munmap(a) do { } while (0)
 
-static inline int register_timer_hook(int (*hook)(struct pt_regs *))
-{
-       return -ENOSYS;
-}
-
-static inline void unregister_timer_hook(int (*hook)(struct pt_regs *))
-{
-       return;
-}
-
 #endif /* CONFIG_PROFILING */
 
 #endif /* _LINUX_PROFILE_H */
index 519777e3fa019f87883224154e875003ca56bbdd..1342e69542f345dada550d298f7bacf6f6fe75e9 100644 (file)
@@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
 unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
 unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
 
 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
index 4f628a6fc5b415cb763e9fd3b9260526649a7436..02b83db8e2c5d2f330407a7f76c42e7ff0ac11c2 100644 (file)
@@ -35,13 +35,20 @@ struct inode;
 # include <asm/uprobes.h>
 #endif
 
+#define UPROBE_HANDLER_REMOVE          1
+#define UPROBE_HANDLER_MASK            1
+
+enum uprobe_filter_ctx {
+       UPROBE_FILTER_REGISTER,
+       UPROBE_FILTER_UNREGISTER,
+       UPROBE_FILTER_MMAP,
+};
+
 struct uprobe_consumer {
        int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
-       /*
-        * filter is optional; If a filter exists, handler is run
-        * if and only if filter returns true.
-        */
-       bool (*filter)(struct uprobe_consumer *self, struct task_struct *task);
+       bool (*filter)(struct uprobe_consumer *self,
+                               enum uprobe_filter_ctx ctx,
+                               struct mm_struct *mm);
 
        struct uprobe_consumer *next;
 };
@@ -94,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
 extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
 extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
 extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
 extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -117,6 +125,11 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
        return -ENOSYS;
 }
+static inline int
+uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
+{
+       return -ENOSYS;
+}
 static inline void
 uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
new file mode 100644 (file)
index 0000000..88b8783
--- /dev/null
@@ -0,0 +1,77 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ras
+
+#if !defined(_TRACE_AER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_AER_H
+
+#include <linux/tracepoint.h>
+#include <linux/edac.h>
+
+
+/*
+ * PCIe AER Trace event
+ *
+ * These events are generated when hardware detects a corrected or
+ * uncorrected event on a PCIe device. The event report has
+ * the following structure:
+ *
+ * char * dev_name -   The name of the slot where the device resides
+ *                     ([domain:]bus:device.function).
+ * u32 status -                Either the correctable or uncorrectable register
+ *                     indicating what error or errors have been seen
+ * u8 severity -       error severity 0:NONFATAL 1:FATAL 2:CORRECTED
+ */
+
+#define aer_correctable_errors         \
+       {BIT(0),        "Receiver Error"},              \
+       {BIT(6),        "Bad TLP"},                     \
+       {BIT(7),        "Bad DLLP"},                    \
+       {BIT(8),        "RELAY_NUM Rollover"},          \
+       {BIT(12),       "Replay Timer Timeout"},        \
+       {BIT(13),       "Advisory Non-Fatal"}
+
+#define aer_uncorrectable_errors               \
+       {BIT(4),        "Data Link Protocol"},          \
+       {BIT(12),       "Poisoned TLP"},                \
+       {BIT(13),       "Flow Control Protocol"},       \
+       {BIT(14),       "Completion Timeout"},          \
+       {BIT(15),       "Completer Abort"},             \
+       {BIT(16),       "Unexpected Completion"},       \
+       {BIT(17),       "Receiver Overflow"},           \
+       {BIT(18),       "Malformed TLP"},               \
+       {BIT(19),       "ECRC"},                        \
+       {BIT(20),       "Unsupported Request"}
+
+TRACE_EVENT(aer_event,
+       TP_PROTO(const char *dev_name,
+                const u32 status,
+                const u8 severity),
+
+       TP_ARGS(dev_name, status, severity),
+
+       TP_STRUCT__entry(
+               __string(       dev_name,       dev_name        )
+               __field(        u32,            status          )
+               __field(        u8,             severity        )
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __entry->status         = status;
+               __entry->severity       = severity;
+       ),
+
+       TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
+               __get_str(dev_name),
+               __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" :
+                       __entry->severity == HW_EVENT_ERR_FATAL ?
+                       "Fatal" : "Uncorrected",
+               __entry->severity == HW_EVENT_ERR_CORRECTED ?
+               __print_flags(__entry->status, "|", aer_correctable_errors) :
+               __print_flags(__entry->status, "|", aer_uncorrectable_errors))
+);
+
+#endif /* _TRACE_AER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 4f63c05d27c91d04569971ea4a2c4849203c36a9..9fa9c622a7f45ab6032aefd74001930b4d7ddb07 100644 (file)
@@ -579,7 +579,8 @@ enum perf_event_type {
         *      { u32                   size;
         *        char                  data[size];}&& PERF_SAMPLE_RAW
         *
-        *      { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+        *      { u64                   nr;
+        *        { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
         *
         *      { u64                   abi; # enum perf_sample_regs_abi
         *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
index 7b6646a8c067b4ec9143585746fd2b2bccb73f0c..5c75791d7269e87c19a86fe711364f2c3c2793f2 100644 (file)
@@ -6171,11 +6171,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        if (task) {
                event->attach_state = PERF_ATTACH_TASK;
+
+               if (attr->type == PERF_TYPE_TRACEPOINT)
+                       event->hw.tp_target = task;
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                /*
                 * hw_breakpoint is a bit difficult here..
                 */
-               if (attr->type == PERF_TYPE_BREAKPOINT)
+               else if (attr->type == PERF_TYPE_BREAKPOINT)
                        event->hw.bp_target = task;
 #endif
        }
index fe8a916507ed278cf545196561878a842547a865..a64f8aeb5c1f5adae53fde406b3d904e1485aa80 100644 (file)
@@ -676,7 +676,7 @@ int __init init_hw_breakpoint(void)
  err_alloc:
        for_each_possible_cpu(err_cpu) {
                for (i = 0; i < TYPE_MAX; i++)
-                       kfree(per_cpu(nr_task_bp_pinned[i], cpu));
+                       kfree(per_cpu(nr_task_bp_pinned[i], err_cpu));
                if (err_cpu == cpu)
                        break;
        }
index dea7acfbb0710889bf19e5fcb6584da4c5302f5d..a567c8c7ef31fa8c7d9416c8a255717f6b978932 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pagemap.h>     /* read_mapping_page */
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/export.h>
 #include <linux/rmap.h>                /* anon_vma_prepare */
 #include <linux/mmu_notifier.h>        /* set_pte_at_notify */
 #include <linux/swap.h>                /* try_to_free_swap */
 #define MAX_UPROBE_XOL_SLOTS           UINSNS_PER_PAGE
 
 static struct rb_root uprobes_tree = RB_ROOT;
-
-static DEFINE_SPINLOCK(uprobes_treelock);      /* serialize rbtree access */
-
-#define UPROBES_HASH_SZ        13
-
 /*
- * We need separate register/unregister and mmap/munmap lock hashes because
- * of mmap_sem nesting.
- *
- * uprobe_register() needs to install probes on (potentially) all processes
- * and thus needs to acquire multiple mmap_sems (consequtively, not
- * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
- * for the particular process doing the mmap.
- *
- * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
- * because of lock order against i_mmap_mutex. This means there's a hole in
- * the register vma iteration where a mmap() can happen.
- *
- * Thus uprobe_register() can race with uprobe_mmap() and we can try and
- * install a probe where one is already installed.
+ * allows us to skip the uprobe_mmap if there are no uprobe events active
+ * at this time.  Probably a fine grained per inode count is better?
  */
+#define no_uprobe_events()     RB_EMPTY_ROOT(&uprobes_tree)
 
-/* serialize (un)register */
-static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
-
-#define uprobes_hash(v)                (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
+static DEFINE_SPINLOCK(uprobes_treelock);      /* serialize rbtree access */
 
+#define UPROBES_HASH_SZ        13
 /* serialize uprobe->pending_list */
 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
 #define uprobes_mmap_hash(v)   (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
 
 static struct percpu_rw_semaphore dup_mmap_sem;
 
-/*
- * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
- * events active at this time.  Probably a fine grained per inode count is
- * better?
- */
-static atomic_t uprobe_events = ATOMIC_INIT(0);
-
 /* Have a copy of original instruction */
 #define UPROBE_COPY_INSN       0
-/* Dont run handlers when first register/ last unregister in progress*/
-#define UPROBE_RUN_HANDLER     1
 /* Can skip singlestep */
-#define UPROBE_SKIP_SSTEP      2
+#define UPROBE_SKIP_SSTEP      1
 
 struct uprobe {
        struct rb_node          rb_node;        /* node in the rb tree */
        atomic_t                ref;
+       struct rw_semaphore     register_rwsem;
        struct rw_semaphore     consumer_rwsem;
-       struct mutex            copy_mutex;     /* TODO: kill me and UPROBE_COPY_INSN */
        struct list_head        pending_list;
        struct uprobe_consumer  *consumers;
        struct inode            *inode;         /* Also hold a ref to inode */
@@ -430,9 +404,6 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
        u = __insert_uprobe(uprobe);
        spin_unlock(&uprobes_treelock);
 
-       /* For now assume that the instruction need not be single-stepped */
-       __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
-
        return u;
 }
 
@@ -452,8 +423,10 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 
        uprobe->inode = igrab(inode);
        uprobe->offset = offset;
+       init_rwsem(&uprobe->register_rwsem);
        init_rwsem(&uprobe->consumer_rwsem);
-       mutex_init(&uprobe->copy_mutex);
+       /* For now assume that the instruction need not be single-stepped */
+       __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
 
        /* add to uprobes_tree, sorted on inode:offset */
        cur_uprobe = insert_uprobe(uprobe);
@@ -463,38 +436,17 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
                kfree(uprobe);
                uprobe = cur_uprobe;
                iput(inode);
-       } else {
-               atomic_inc(&uprobe_events);
        }
 
        return uprobe;
 }
 
-static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
-{
-       struct uprobe_consumer *uc;
-
-       if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
-               return;
-
-       down_read(&uprobe->consumer_rwsem);
-       for (uc = uprobe->consumers; uc; uc = uc->next) {
-               if (!uc->filter || uc->filter(uc, current))
-                       uc->handler(uc, regs);
-       }
-       up_read(&uprobe->consumer_rwsem);
-}
-
-/* Returns the previous consumer */
-static struct uprobe_consumer *
-consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
+static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
        down_write(&uprobe->consumer_rwsem);
        uc->next = uprobe->consumers;
        uprobe->consumers = uc;
        up_write(&uprobe->consumer_rwsem);
-
-       return uc->next;
 }
 
 /*
@@ -588,7 +540,8 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
        if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
                return ret;
 
-       mutex_lock(&uprobe->copy_mutex);
+       /* TODO: move this into _register, until then we abuse this sem. */
+       down_write(&uprobe->consumer_rwsem);
        if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
                goto out;
 
@@ -612,7 +565,30 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
        set_bit(UPROBE_COPY_INSN, &uprobe->flags);
 
  out:
-       mutex_unlock(&uprobe->copy_mutex);
+       up_write(&uprobe->consumer_rwsem);
+
+       return ret;
+}
+
+static inline bool consumer_filter(struct uprobe_consumer *uc,
+                                  enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+{
+       return !uc->filter || uc->filter(uc, ctx, mm);
+}
+
+static bool filter_chain(struct uprobe *uprobe,
+                        enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+{
+       struct uprobe_consumer *uc;
+       bool ret = false;
+
+       down_read(&uprobe->consumer_rwsem);
+       for (uc = uprobe->consumers; uc; uc = uc->next) {
+               ret = consumer_filter(uc, ctx, mm);
+               if (ret)
+                       break;
+       }
+       up_read(&uprobe->consumer_rwsem);
 
        return ret;
 }
@@ -624,16 +600,6 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
        bool first_uprobe;
        int ret;
 
-       /*
-        * If probe is being deleted, unregister thread could be done with
-        * the vma-rmap-walk through. Adding a probe now can be fatal since
-        * nobody will be able to cleanup. Also we could be from fork or
-        * mremap path, where the probe might have already been inserted.
-        * Hence behave as if probe already existed.
-        */
-       if (!uprobe->consumers)
-               return 0;
-
        ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
        if (ret)
                return ret;
@@ -658,14 +624,14 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 static int
 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 {
-       /* can happen if uprobe_register() fails */
-       if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
-               return 0;
-
        set_bit(MMF_RECALC_UPROBES, &mm->flags);
        return set_orig_insn(&uprobe->arch, mm, vaddr);
 }
 
+static inline bool uprobe_is_active(struct uprobe *uprobe)
+{
+       return !RB_EMPTY_NODE(&uprobe->rb_node);
+}
 /*
  * There could be threads that have already hit the breakpoint. They
  * will recheck the current insn and restart if find_uprobe() fails.
@@ -673,12 +639,15 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad
  */
 static void delete_uprobe(struct uprobe *uprobe)
 {
+       if (WARN_ON(!uprobe_is_active(uprobe)))
+               return;
+
        spin_lock(&uprobes_treelock);
        rb_erase(&uprobe->rb_node, &uprobes_tree);
        spin_unlock(&uprobes_treelock);
+       RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
        iput(uprobe->inode);
        put_uprobe(uprobe);
-       atomic_dec(&uprobe_events);
 }
 
 struct map_info {
@@ -764,8 +733,10 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
        return curr;
 }
 
-static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
+static int
+register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
 {
+       bool is_register = !!new;
        struct map_info *info;
        int err = 0;
 
@@ -794,10 +765,16 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
                    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
                        goto unlock;
 
-               if (is_register)
-                       err = install_breakpoint(uprobe, mm, vma, info->vaddr);
-               else
-                       err |= remove_breakpoint(uprobe, mm, info->vaddr);
+               if (is_register) {
+                       /* consult only the "caller", new consumer. */
+                       if (consumer_filter(new,
+                                       UPROBE_FILTER_REGISTER, mm))
+                               err = install_breakpoint(uprobe, mm, vma, info->vaddr);
+               } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
+                       if (!filter_chain(uprobe,
+                                       UPROBE_FILTER_UNREGISTER, mm))
+                               err |= remove_breakpoint(uprobe, mm, info->vaddr);
+               }
 
  unlock:
                up_write(&mm->mmap_sem);
@@ -810,17 +787,23 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
        return err;
 }
 
-static int __uprobe_register(struct uprobe *uprobe)
+static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
-       return register_for_each_vma(uprobe, true);
+       consumer_add(uprobe, uc);
+       return register_for_each_vma(uprobe, uc);
 }
 
-static void __uprobe_unregister(struct uprobe *uprobe)
+static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
-       if (!register_for_each_vma(uprobe, false))
-               delete_uprobe(uprobe);
+       int err;
+
+       if (!consumer_del(uprobe, uc))  /* WARN? */
+               return;
 
+       err = register_for_each_vma(uprobe, NULL);
        /* TODO : cant unregister? schedule a worker thread */
+       if (!uprobe->consumers && !err)
+               delete_uprobe(uprobe);
 }
 
 /*
@@ -845,31 +828,59 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
        struct uprobe *uprobe;
        int ret;
 
-       if (!inode || !uc || uc->next)
-               return -EINVAL;
-
+       /* Racy, just to catch the obvious mistakes */
        if (offset > i_size_read(inode))
                return -EINVAL;
 
-       ret = 0;
-       mutex_lock(uprobes_hash(inode));
+ retry:
        uprobe = alloc_uprobe(inode, offset);
-
-       if (!uprobe) {
-               ret = -ENOMEM;
-       } else if (!consumer_add(uprobe, uc)) {
-               ret = __uprobe_register(uprobe);
-               if (ret) {
-                       uprobe->consumers = NULL;
-                       __uprobe_unregister(uprobe);
-               } else {
-                       set_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
-               }
+       if (!uprobe)
+               return -ENOMEM;
+       /*
+        * We can race with uprobe_unregister()->delete_uprobe().
+        * Check uprobe_is_active() and retry if it is false.
+        */
+       down_write(&uprobe->register_rwsem);
+       ret = -EAGAIN;
+       if (likely(uprobe_is_active(uprobe))) {
+               ret = __uprobe_register(uprobe, uc);
+               if (ret)
+                       __uprobe_unregister(uprobe, uc);
        }
+       up_write(&uprobe->register_rwsem);
+       put_uprobe(uprobe);
 
-       mutex_unlock(uprobes_hash(inode));
-       if (uprobe)
-               put_uprobe(uprobe);
+       if (unlikely(ret == -EAGAIN))
+               goto retry;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(uprobe_register);
+
+/*
+ * uprobe_apply - unregister a already registered probe.
+ * @inode: the file in which the probe has to be removed.
+ * @offset: offset from the start of the file.
+ * @uc: consumer which wants to add more or remove some breakpoints
+ * @add: add or remove the breakpoints
+ */
+int uprobe_apply(struct inode *inode, loff_t offset,
+                       struct uprobe_consumer *uc, bool add)
+{
+       struct uprobe *uprobe;
+       struct uprobe_consumer *con;
+       int ret = -ENOENT;
+
+       uprobe = find_uprobe(inode, offset);
+       if (!uprobe)
+               return ret;
+
+       down_write(&uprobe->register_rwsem);
+       for (con = uprobe->consumers; con && con != uc ; con = con->next)
+               ;
+       if (con)
+               ret = register_for_each_vma(uprobe, add ? uc : NULL);
+       up_write(&uprobe->register_rwsem);
+       put_uprobe(uprobe);
 
        return ret;
 }
@@ -884,25 +895,42 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
 {
        struct uprobe *uprobe;
 
-       if (!inode || !uc)
-               return;
-
        uprobe = find_uprobe(inode, offset);
        if (!uprobe)
                return;
 
-       mutex_lock(uprobes_hash(inode));
+       down_write(&uprobe->register_rwsem);
+       __uprobe_unregister(uprobe, uc);
+       up_write(&uprobe->register_rwsem);
+       put_uprobe(uprobe);
+}
+EXPORT_SYMBOL_GPL(uprobe_unregister);
 
-       if (consumer_del(uprobe, uc)) {
-               if (!uprobe->consumers) {
-                       __uprobe_unregister(uprobe);
-                       clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
-               }
+static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+       int err = 0;
+
+       down_read(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               unsigned long vaddr;
+               loff_t offset;
+
+               if (!valid_vma(vma, false) ||
+                   vma->vm_file->f_mapping->host != uprobe->inode)
+                       continue;
+
+               offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
+               if (uprobe->offset <  offset ||
+                   uprobe->offset >= offset + vma->vm_end - vma->vm_start)
+                       continue;
+
+               vaddr = offset_to_vaddr(vma, uprobe->offset);
+               err |= remove_breakpoint(uprobe, mm, vaddr);
        }
+       up_read(&mm->mmap_sem);
 
-       mutex_unlock(uprobes_hash(inode));
-       if (uprobe)
-               put_uprobe(uprobe);
+       return err;
 }
 
 static struct rb_node *
@@ -979,7 +1007,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
        struct uprobe *uprobe, *u;
        struct inode *inode;
 
-       if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
+       if (no_uprobe_events() || !valid_vma(vma, true))
                return 0;
 
        inode = vma->vm_file->f_mapping->host;
@@ -988,9 +1016,14 @@ int uprobe_mmap(struct vm_area_struct *vma)
 
        mutex_lock(uprobes_mmap_hash(inode));
        build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
-
+       /*
+        * We can race with uprobe_unregister(), this uprobe can be already
+        * removed. But in this case filter_chain() must return false, all
+        * consumers have gone away.
+        */
        list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               if (!fatal_signal_pending(current)) {
+               if (!fatal_signal_pending(current) &&
+                   filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
                        unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
                        install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
                }
@@ -1025,7 +1058,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e
  */
 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
-       if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
+       if (no_uprobe_events() || !valid_vma(vma, false))
                return;
 
        if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
@@ -1042,22 +1075,14 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
 /* Slot allocation for XOL */
 static int xol_add_vma(struct xol_area *area)
 {
-       struct mm_struct *mm;
-       int ret;
-
-       area->page = alloc_page(GFP_HIGHUSER);
-       if (!area->page)
-               return -ENOMEM;
-
-       ret = -EALREADY;
-       mm = current->mm;
+       struct mm_struct *mm = current->mm;
+       int ret = -EALREADY;
 
        down_write(&mm->mmap_sem);
        if (mm->uprobes_state.xol_area)
                goto fail;
 
        ret = -ENOMEM;
-
        /* Try to map as high as possible, this is only a hint. */
        area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
        if (area->vaddr & ~PAGE_MASK) {
@@ -1073,54 +1098,53 @@ static int xol_add_vma(struct xol_area *area)
        smp_wmb();      /* pairs with get_xol_area() */
        mm->uprobes_state.xol_area = area;
        ret = 0;
-
-fail:
+ fail:
        up_write(&mm->mmap_sem);
-       if (ret)
-               __free_page(area->page);
 
        return ret;
 }
 
-static struct xol_area *get_xol_area(struct mm_struct *mm)
-{
-       struct xol_area *area;
-
-       area = mm->uprobes_state.xol_area;
-       smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
-
-       return area;
-}
-
 /*
- * xol_alloc_area - Allocate process's xol_area.
- * This area will be used for storing instructions for execution out of
- * line.
+ * get_xol_area - Allocate process's xol_area if necessary.
+ * This area will be used for storing instructions for execution out of line.
  *
  * Returns the allocated area or NULL.
  */
-static struct xol_area *xol_alloc_area(void)
+static struct xol_area *get_xol_area(void)
 {
+       struct mm_struct *mm = current->mm;
        struct xol_area *area;
 
+       area = mm->uprobes_state.xol_area;
+       if (area)
+               goto ret;
+
        area = kzalloc(sizeof(*area), GFP_KERNEL);
        if (unlikely(!area))
-               return NULL;
+               goto out;
 
        area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
-
        if (!area->bitmap)
-               goto fail;
+               goto free_area;
+
+       area->page = alloc_page(GFP_HIGHUSER);
+       if (!area->page)
+               goto free_bitmap;
 
        init_waitqueue_head(&area->wq);
        if (!xol_add_vma(area))
                return area;
 
-fail:
+       __free_page(area->page);
+ free_bitmap:
        kfree(area->bitmap);
+ free_area:
        kfree(area);
-
-       return get_xol_area(current->mm);
+ out:
+       area = mm->uprobes_state.xol_area;
+ ret:
+       smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
+       return area;
 }
 
 /*
@@ -1186,33 +1210,26 @@ static unsigned long xol_take_insn_slot(struct xol_area *area)
 }
 
 /*
- * xol_get_insn_slot - If was not allocated a slot, then
- * allocate a slot.
+ * xol_get_insn_slot - allocate a slot for xol.
  * Returns the allocated slot address or 0.
  */
-static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
+static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
 {
        struct xol_area *area;
        unsigned long offset;
+       unsigned long xol_vaddr;
        void *vaddr;
 
-       area = get_xol_area(current->mm);
-       if (!area) {
-               area = xol_alloc_area();
-               if (!area)
-                       return 0;
-       }
-       current->utask->xol_vaddr = xol_take_insn_slot(area);
+       area = get_xol_area();
+       if (!area)
+               return 0;
 
-       /*
-        * Initialize the slot if xol_vaddr points to valid
-        * instruction slot.
-        */
-       if (unlikely(!current->utask->xol_vaddr))
+       xol_vaddr = xol_take_insn_slot(area);
+       if (unlikely(!xol_vaddr))
                return 0;
 
-       current->utask->vaddr = slot_addr;
-       offset = current->utask->xol_vaddr & ~PAGE_MASK;
+       /* Initialize the slot */
+       offset = xol_vaddr & ~PAGE_MASK;
        vaddr = kmap_atomic(area->page);
        memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
        kunmap_atomic(vaddr);
@@ -1222,7 +1239,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot
         */
        flush_dcache_page(area->page);
 
-       return current->utask->xol_vaddr;
+       return xol_vaddr;
 }
 
 /*
@@ -1240,8 +1257,7 @@ static void xol_free_insn_slot(struct task_struct *tsk)
                return;
 
        slot_addr = tsk->utask->xol_vaddr;
-
-       if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
+       if (unlikely(!slot_addr))
                return;
 
        area = tsk->mm->uprobes_state.xol_area;
@@ -1303,33 +1319,48 @@ void uprobe_copy_process(struct task_struct *t)
 }
 
 /*
- * Allocate a uprobe_task object for the task.
- * Called when the thread hits a breakpoint for the first time.
+ * Allocate a uprobe_task object for the task if if necessary.
+ * Called when the thread hits a breakpoint.
  *
  * Returns:
  * - pointer to new uprobe_task on success
  * - NULL otherwise
  */
-static struct uprobe_task *add_utask(void)
+static struct uprobe_task *get_utask(void)
 {
-       struct uprobe_task *utask;
-
-       utask = kzalloc(sizeof *utask, GFP_KERNEL);
-       if (unlikely(!utask))
-               return NULL;
-
-       current->utask = utask;
-       return utask;
+       if (!current->utask)
+               current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
+       return current->utask;
 }
 
 /* Prepare to single-step probed instruction out of line. */
 static int
-pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
+pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
 {
-       if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
-               return 0;
+       struct uprobe_task *utask;
+       unsigned long xol_vaddr;
+       int err;
+
+       utask = get_utask();
+       if (!utask)
+               return -ENOMEM;
+
+       xol_vaddr = xol_get_insn_slot(uprobe);
+       if (!xol_vaddr)
+               return -ENOMEM;
+
+       utask->xol_vaddr = xol_vaddr;
+       utask->vaddr = bp_vaddr;
+
+       err = arch_uprobe_pre_xol(&uprobe->arch, regs);
+       if (unlikely(err)) {
+               xol_free_insn_slot(current);
+               return err;
+       }
 
-       return -EFAULT;
+       utask->active_uprobe = uprobe;
+       utask->state = UTASK_SSTEP;
+       return 0;
 }
 
 /*
@@ -1391,6 +1422,7 @@ static void mmf_recalc_uprobes(struct mm_struct *mm)
                 * This is not strictly accurate, we can race with
                 * uprobe_unregister() and see the already removed
                 * uprobe if delete_uprobe() was not yet called.
+                * Or this uprobe can be filtered out.
                 */
                if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
                        return;
@@ -1452,13 +1484,33 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
        return uprobe;
 }
 
+static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+{
+       struct uprobe_consumer *uc;
+       int remove = UPROBE_HANDLER_REMOVE;
+
+       down_read(&uprobe->register_rwsem);
+       for (uc = uprobe->consumers; uc; uc = uc->next) {
+               int rc = uc->handler(uc, regs);
+
+               WARN(rc & ~UPROBE_HANDLER_MASK,
+                       "bad rc=0x%x from %pf()\n", rc, uc->handler);
+               remove &= rc;
+       }
+
+       if (remove && uprobe->consumers) {
+               WARN_ON(!uprobe_is_active(uprobe));
+               unapply_uprobe(uprobe, current->mm);
+       }
+       up_read(&uprobe->register_rwsem);
+}
+
 /*
  * Run handler and ask thread to singlestep.
  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
  */
 static void handle_swbp(struct pt_regs *regs)
 {
-       struct uprobe_task *utask;
        struct uprobe *uprobe;
        unsigned long bp_vaddr;
        int uninitialized_var(is_swbp);
@@ -1483,6 +1535,10 @@ static void handle_swbp(struct pt_regs *regs)
                }
                return;
        }
+
+       /* change it in advance for ->handler() and restart */
+       instruction_pointer_set(regs, bp_vaddr);
+
        /*
         * TODO: move copy_insn/etc into _register and remove this hack.
         * After we hit the bp, _unregister + _register can install the
@@ -1490,32 +1546,16 @@ static void handle_swbp(struct pt_regs *regs)
         */
        smp_rmb(); /* pairs with wmb() in install_breakpoint() */
        if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
-               goto restart;
-
-       utask = current->utask;
-       if (!utask) {
-               utask = add_utask();
-               /* Cannot allocate; re-execute the instruction. */
-               if (!utask)
-                       goto restart;
-       }
+               goto out;
 
        handler_chain(uprobe, regs);
        if (can_skip_sstep(uprobe, regs))
                goto out;
 
-       if (!pre_ssout(uprobe, regs, bp_vaddr)) {
-               utask->active_uprobe = uprobe;
-               utask->state = UTASK_SSTEP;
+       if (!pre_ssout(uprobe, regs, bp_vaddr))
                return;
-       }
 
-restart:
-       /*
-        * cannot singlestep; cannot skip instruction;
-        * re-execute the instruction.
-        */
-       instruction_pointer_set(regs, bp_vaddr);
+       /* can_skip_sstep() succeeded, or restart if can't singlestep */
 out:
        put_uprobe(uprobe);
 }
@@ -1609,10 +1649,8 @@ static int __init init_uprobes(void)
 {
        int i;
 
-       for (i = 0; i < UPROBES_HASH_SZ; i++) {
-               mutex_init(&uprobes_mutex[i]);
+       for (i = 0; i < UPROBES_HASH_SZ; i++)
                mutex_init(&uprobes_mmap_mutex[i]);
-       }
 
        if (percpu_init_rwsem(&dup_mmap_sem))
                return -ENOMEM;
index 098f396aa40984d0b49f246b45dd059b88e9acdf..f423c3ef4a82938f4e81165cb70378787b6fbb02 100644 (file)
@@ -919,7 +919,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 }
 #endif /* CONFIG_OPTPROBES */
 
-#ifdef KPROBES_CAN_USE_FTRACE
+#ifdef CONFIG_KPROBES_ON_FTRACE
 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
        .func = kprobe_ftrace_handler,
        .flags = FTRACE_OPS_FL_SAVE_REGS,
@@ -964,7 +964,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
                           (unsigned long)p->addr, 1, 0);
        WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 }
-#else  /* !KPROBES_CAN_USE_FTRACE */
+#else  /* !CONFIG_KPROBES_ON_FTRACE */
 #define prepare_kprobe(p)      arch_prepare_kprobe(p)
 #define arm_kprobe_ftrace(p)   do {} while (0)
 #define disarm_kprobe_ftrace(p)        do {} while (0)
@@ -1414,12 +1414,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
         */
        ftrace_addr = ftrace_location((unsigned long)p->addr);
        if (ftrace_addr) {
-#ifdef KPROBES_CAN_USE_FTRACE
+#ifdef CONFIG_KPROBES_ON_FTRACE
                /* Given address is not on the instruction boundary */
                if ((unsigned long)p->addr != ftrace_addr)
                        return -EILSEQ;
                p->flags |= KPROBE_FLAG_FTRACE;
-#else  /* !KPROBES_CAN_USE_FTRACE */
+#else  /* !CONFIG_KPROBES_ON_FTRACE */
                return -EINVAL;
 #endif
        }
index 1f391819c42fe6d6539f5ba5e66e6f365b877f6d..dc3384ee874e4ed5a0344395757744adc268c9bc 100644 (file)
@@ -37,9 +37,6 @@ struct profile_hit {
 #define NR_PROFILE_HIT         (PAGE_SIZE/sizeof(struct profile_hit))
 #define NR_PROFILE_GRP         (NR_PROFILE_HIT/PROFILE_GRPSZ)
 
-/* Oprofile timer tick hook */
-static int (*timer_hook)(struct pt_regs *) __read_mostly;
-
 static atomic_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
 
@@ -208,25 +205,6 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n)
 }
 EXPORT_SYMBOL_GPL(profile_event_unregister);
 
-int register_timer_hook(int (*hook)(struct pt_regs *))
-{
-       if (timer_hook)
-               return -EBUSY;
-       timer_hook = hook;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(register_timer_hook);
-
-void unregister_timer_hook(int (*hook)(struct pt_regs *))
-{
-       WARN_ON(hook != timer_hook);
-       timer_hook = NULL;
-       /* make sure all CPUs see the NULL hook */
-       synchronize_sched();  /* Allow ongoing interrupts to complete. */
-}
-EXPORT_SYMBOL_GPL(unregister_timer_hook);
-
-
 #ifdef CONFIG_SMP
 /*
  * Each cpu has a pair of open-addressed hashtables for pending
@@ -436,8 +414,6 @@ void profile_tick(int type)
 {
        struct pt_regs *regs = get_irq_regs();
 
-       if (type == CPU_PROFILING && timer_hook)
-               timer_hook(regs);
        if (!user_mode(regs) && prof_cpu_mask != NULL &&
            cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
                profile_hit(type, (void *)profile_pc(regs));
index 6cbeaae4406d3bff3ce60394206e041c78e1c0bc..acbd28424d812040e060a82ae2d6f28e156e5fe1 100644 (file)
@@ -712,6 +712,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
                                             kiov->iov_len, kiov->iov_base);
 }
 
+/*
+ * This is declared in linux/regset.h and defined in machine-dependent
+ * code.  We put the export here, near the primary machine-neutral use,
+ * to ensure no machine forgets it.
+ */
+EXPORT_SYMBOL_GPL(task_user_regset_view);
 #endif
 
 int ptrace_request(struct task_struct *child, long request,
index 5d89335a485f7480ea4fa3924261e5d659e79ad0..36567564e221195d1a8489207fed82f27c36b85e 100644 (file)
@@ -39,6 +39,9 @@ config HAVE_DYNAMIC_FTRACE
        help
          See Documentation/trace/ftrace-design.txt
 
+config HAVE_DYNAMIC_FTRACE_WITH_REGS
+       bool
+
 config HAVE_FTRACE_MCOUNT_RECORD
        bool
        help
@@ -250,6 +253,16 @@ config FTRACE_SYSCALLS
        help
          Basic tracer to catch the syscall entry and exit events.
 
+config TRACER_SNAPSHOT
+       bool "Create a snapshot trace buffer"
+       select TRACER_MAX_TRACE
+       help
+         Allow tracing users to take snapshot of the current buffer using the
+         ftrace interface, e.g.:
+
+             echo 1 > /sys/kernel/debug/tracing/snapshot
+             cat snapshot
+
 config TRACE_BRANCH_PROFILING
        bool
        select GENERIC_TRACER
@@ -434,6 +447,11 @@ config DYNAMIC_FTRACE
          were made. If so, it runs stop_machine (stops all CPUS)
          and modifies the code to jump over the call to ftrace.
 
+config DYNAMIC_FTRACE_WITH_REGS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+
 config FUNCTION_PROFILER
        bool "Kernel function profiler"
        depends on FUNCTION_TRACER
index c0bd0308741ca1a343a9cbc0b551b4a12438ddd7..71259e2b6b6167985fe0550e8c47de33a770e603 100644 (file)
@@ -147,7 +147,7 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
                return;
 
        local_irq_save(flags);
-       buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
+       buf = this_cpu_ptr(bt->msg_data);
        va_start(args, fmt);
        n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
        va_end(args);
index 41473b4ad7a47c4ef41672da3ecf797bb722a89b..ce8c3d68292f701ac7813f4e20a19af79896fa4b 100644 (file)
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 #endif
 
+/*
+ * Traverse the ftrace_global_list, invoking all entries.  The reason that we
+ * can use rcu_dereference_raw() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism.  The rcu_dereference_raw() calls are needed to handle
+ * concurrent insertions into the ftrace_global_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list)                        \
+       op = rcu_dereference_raw(list);                 \
+       do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op)                           \
+       while (likely(op = rcu_dereference_raw((op)->next)) &&  \
+              unlikely((op) != &ftrace_list_end))
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void)
        return cnt;
 }
 
-/*
- * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
- * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
- *
- * Silly Alpha and silly pointer-speculation compiler optimizations!
- */
 static void
 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
                        struct ftrace_ops *op, struct pt_regs *regs)
 {
-       if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
+       int bit;
+
+       bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
+       if (bit < 0)
                return;
 
-       trace_recursion_set(TRACE_GLOBAL_BIT);
-       op = rcu_dereference_raw(ftrace_global_list); /*see above*/
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_global_list) {
                op->func(ip, parent_ip, op, regs);
-               op = rcu_dereference_raw(op->next); /*see above*/
-       };
-       trace_recursion_clear(TRACE_GLOBAL_BIT);
+       } while_for_each_ftrace_op(op);
+
+       trace_clear_recursion(bit);
 }
 
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -221,10 +233,24 @@ static void update_global_ops(void)
         * registered callers.
         */
        if (ftrace_global_list == &ftrace_list_end ||
-           ftrace_global_list->next == &ftrace_list_end)
+           ftrace_global_list->next == &ftrace_list_end) {
                func = ftrace_global_list->func;
-       else
+               /*
+                * As we are calling the function directly.
+                * If it does not have recursion protection,
+                * the function_trace_op needs to be updated
+                * accordingly.
+                */
+               if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
+                       global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
+               else
+                       global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
+       } else {
                func = ftrace_global_list_func;
+               /* The list has its own recursion protection. */
+               global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
+       }
+
 
        /* If we filter on pids, update to use the pid function */
        if (!list_empty(&ftrace_pids)) {
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
                return -EINVAL;
 
-#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
        /*
         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
@@ -4090,14 +4116,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        trace_recursion_set(TRACE_CONTROL_BIT);
-       op = rcu_dereference_raw(ftrace_control_list);
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_control_list) {
                if (!ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip))
                        op->func(ip, parent_ip, op, regs);
-
-               op = rcu_dereference_raw(op->next);
-       };
+       } while_for_each_ftrace_op(op);
        trace_recursion_clear(TRACE_CONTROL_BIT);
        preempt_enable_notrace();
 }
@@ -4112,27 +4135,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
        struct ftrace_ops *op;
+       int bit;
 
        if (function_trace_stop)
                return;
 
-       if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
+       bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+       if (bit < 0)
                return;
 
-       trace_recursion_set(TRACE_INTERNAL_BIT);
        /*
         * Some of the ops may be dynamically allocated,
         * they must be freed after a synchronize_sched().
         */
        preempt_disable_notrace();
-       op = rcu_dereference_raw(ftrace_ops_list);
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
                if (ftrace_ops_test(op, ip))
                        op->func(ip, parent_ip, op, regs);
-               op = rcu_dereference_raw(op->next);
-       };
+       } while_for_each_ftrace_op(op);
        preempt_enable_notrace();
-       trace_recursion_clear(TRACE_INTERNAL_BIT);
+       trace_clear_recursion(bit);
 }
 
 /*
@@ -4143,8 +4165,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
  * Archs are to support both the regs and ftrace_ops at the same time.
  * If they support ftrace_ops, it is assumed they support regs.
  * If call backs want to use regs, they must either check for regs
- * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
- * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
+ * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
+ * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
  * An architecture can pass partial regs with ftrace_ops and still
  * set the ARCH_SUPPORT_FTARCE_OPS.
  */
index ce8514feedcdf29c181dda4972eb7a53cd0df04e..7244acde77b0a9a5670d401fc505c64c5d8b06c0 100644 (file)
@@ -3,8 +3,10 @@
  *
  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  */
+#include <linux/ftrace_event.h>
 #include <linux/ring_buffer.h>
 #include <linux/trace_clock.h>
+#include <linux/trace_seq.h>
 #include <linux/spinlock.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
@@ -21,7 +23,6 @@
 #include <linux/fs.h>
 
 #include <asm/local.h>
-#include "trace.h"
 
 static void update_pages_handler(struct work_struct *work);
 
@@ -2432,41 +2433,76 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
 #ifdef CONFIG_TRACING
 
-#define TRACE_RECURSIVE_DEPTH 16
+/*
+ * The lock and unlock are done within a preempt disable section.
+ * The current_context per_cpu variable can only be modified
+ * by the current task between lock and unlock. But it can
+ * be modified more than once via an interrupt. To pass this
+ * information from the lock to the unlock without having to
+ * access the 'in_interrupt()' functions again (which do show
+ * a bit of overhead in something as critical as function tracing,
+ * we use a bitmask trick.
+ *
+ *  bit 0 =  NMI context
+ *  bit 1 =  IRQ context
+ *  bit 2 =  SoftIRQ context
+ *  bit 3 =  normal context.
+ *
+ * This works because this is the order of contexts that can
+ * preempt other contexts. A SoftIRQ never preempts an IRQ
+ * context.
+ *
+ * When the context is determined, the corresponding bit is
+ * checked and set (if it was set, then a recursion of that context
+ * happened).
+ *
+ * On unlock, we need to clear this bit. To do so, just subtract
+ * 1 from the current_context and AND it to itself.
+ *
+ * (binary)
+ *  101 - 1 = 100
+ *  101 & 100 = 100 (clearing bit zero)
+ *
+ *  1010 - 1 = 1001
+ *  1010 & 1001 = 1000 (clearing bit 1)
+ *
+ * The least significant bit can be cleared this way, and it
+ * just so happens that it is the same bit corresponding to
+ * the current context.
+ */
+static DEFINE_PER_CPU(unsigned int, current_context);
 
-/* Keep this code out of the fast path cache */
-static noinline void trace_recursive_fail(void)
+static __always_inline int trace_recursive_lock(void)
 {
-       /* Disable all tracing before we do anything else */
-       tracing_off_permanent();
-
-       printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
-                   "HC[%lu]:SC[%lu]:NMI[%lu]\n",
-                   trace_recursion_buffer(),
-                   hardirq_count() >> HARDIRQ_SHIFT,
-                   softirq_count() >> SOFTIRQ_SHIFT,
-                   in_nmi());
-
-       WARN_ON_ONCE(1);
-}
+       unsigned int val = this_cpu_read(current_context);
+       int bit;
 
-static inline int trace_recursive_lock(void)
-{
-       trace_recursion_inc();
+       if (in_interrupt()) {
+               if (in_nmi())
+                       bit = 0;
+               else if (in_irq())
+                       bit = 1;
+               else
+                       bit = 2;
+       } else
+               bit = 3;
 
-       if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
-               return 0;
+       if (unlikely(val & (1 << bit)))
+               return 1;
 
-       trace_recursive_fail();
+       val |= (1 << bit);
+       this_cpu_write(current_context, val);
 
-       return -1;
+       return 0;
 }
 
-static inline void trace_recursive_unlock(void)
+static __always_inline void trace_recursive_unlock(void)
 {
-       WARN_ON_ONCE(!trace_recursion_buffer());
+       unsigned int val = this_cpu_read(current_context);
 
-       trace_recursion_dec();
+       val--;
+       val &= this_cpu_read(current_context);
+       this_cpu_write(current_context, val);
 }
 
 #else
@@ -3066,6 +3102,24 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
 
+/**
+ * ring_buffer_read_events_cpu - get the number of events successfully read
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of events read
+ */
+unsigned long
+ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+
+       if (!cpumask_test_cpu(cpu, buffer->cpumask))
+               return 0;
+
+       cpu_buffer = buffer->buffers[cpu];
+       return cpu_buffer->read;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
+
 /**
  * ring_buffer_entries - get the number of entries in a buffer
  * @buffer: The ring buffer
@@ -3425,7 +3479,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)