Merge branch 'x86/microcode' into perf/core
authorIngo Molnar <mingo@kernel.org>
Thu, 5 Jul 2012 19:13:57 +0000 (21:13 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 5 Jul 2012 19:13:57 +0000 (21:13 +0200)
Merge this branch because we want to rely on the newer (and saner)
microcode loading and checking facilities.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
176 files changed:
Documentation/ABI/testing/sysfs-block-rssd
Documentation/device-mapper/verity.txt
Documentation/prctl/no_new_privs.txt [new file with mode: 0644]
MAINTAINERS
arch/arm/kernel/vmlinux.lds.S
arch/arm/mm/mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/xmon/xmon.c
arch/x86/include/asm/msr.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/uprobes.h
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_intel_uncore.h [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/process_64.c
arch/x86/kernel/uprobes.c
arch/x86/lib/msr-reg-export.c
arch/x86/lib/msr-reg.S
arch/x86/vdso/vdso32-setup.c
arch/x86/xen/enlighten.c
block/blk-cgroup.c
block/blk-core.c
block/blk-timeout.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/umem.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/clk/clk.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/si.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/ti/wlcore/Kconfig
fs/splice.c
include/linux/blkdev.h
include/linux/ftrace_event.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/sched.h
include/linux/splice.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
kernel/events/core.c
kernel/events/uprobes.c
kernel/relay.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_output.c
mm/shmem.c
net/core/dev.c
net/core/skbuff.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nfnetlink.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
security/security.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/codecs/wm2200.c
tools/perf/Documentation/perf-bench.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Makefile
tools/perf/bench/mem-memcpy.c
tools/perf/bench/mem-memset.c
tools/perf/builtin-bench.c
tools/perf/builtin-evlist.c
tools/perf/builtin-kmem.c
tools/perf/builtin-lock.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/config/feature-tests.mak
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/gtk/browser.c
tools/perf/ui/gtk/gtk.h
tools/perf/ui/gtk/setup.c
tools/perf/ui/gtk/util.c [new file with mode: 0644]
tools/perf/ui/tui/setup.c
tools/perf/ui/tui/util.c [new file with mode: 0644]
tools/perf/ui/util.c
tools/perf/ui/util.h
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/hist.h
tools/perf/util/map.h
tools/perf/util/parse-events-test.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/string.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/top.c
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event-scripting.c
tools/perf/util/trace-event.h
tools/perf/util/util.h

index 679ce354312281846e4ef2decee0689b0775c985..beef30c046b0d3181bfc353d15704bb4bdaf3da6 100644 (file)
@@ -1,26 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-                    - Allocated
-                    - Commands in Q
-
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Description:    This is a read-only file. Indicates the status of the device.
-
-What:           /sys/block/rssd*/flags
-Date:           May 2012
-KernelVersion:  3.5
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps the flags in port and driver
-                data structure
index 32e48797a14f80de6ebff7a441e489021bfce58a..9884681535ee36bf03a7d7baaa54abb36360d53d 100644 (file)
@@ -7,39 +7,39 @@ This target is read-only.
 
 Construction Parameters
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
 
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
     0 is the original format used in the Chromium OS.
-       The salt is appended when hashing, digests are stored continuously and
-       the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
     1 is the current format that should be used for new devices.
-       The salt is prepended when hashing and each digest is
-       padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
 
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 ===================
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 Hash Tree
 ---------
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 ==============
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-       uint8_t signature[8]
-               "verity\0\0";
-
-       uint8_t version;
-               1 - current format
-
-       uint8_t data_block_bits;
-               log2(data block size)
-
-       uint8_t hash_block_bits;
-               log2(hash block size)
-
-       uint8_t pad1[1];
-               zero padding
-
-       uint16_t salt_size;
-               big-endian salt size
-
-       uint8_t pad2[2];
-               zero padding
-
-       uint32_t data_blocks_hi;
-               big-endian high 32 bits of the 64-bit number of data blocks
-
-       uint32_t data_blocks_lo;
-               big-endian low 32 bits of the 64-bit number of data blocks
-
-       uint8_t algorithm[16];
-               cryptographic algorithm
-
-       uint8_t salt[384];
-               salt (the salt size is specified above)
-
-       uint8_t pad3[88];
-               zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 ======
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 Example
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-       4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644 (file)
index 0000000..cb705ec
--- /dev/null
@@ -0,0 +1,50 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
index eb22272b2116c70cb6a6af438c24ab9f2edad9fb..03df1d15ebf3d9de18052bdf797342b261922672 100644 (file)
@@ -4654,8 +4654,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:     git git://1984.lsi.us.es/nf
+T:     git git://1984.lsi.us.es/nf-next
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
index 43a31fb06318dc0a1213827a5069b73cac19f5a6..36ff15bbfdd4961afecfef9ea292c6445e9b3836 100644 (file)
@@ -183,7 +183,9 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
index e5dad60b558b468315294b2c8b95c70193b6f74d..cf4528d5177448fb79cb5dc6689c873c2dbddb4a 100644 (file)
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        }
 }
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+       struct vm_struct *vm;
+
+       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm->addr = (void *)addr;
+       vm->size = SECTION_SIZE;
+       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->caller = pmd_empty_section_gap;
+       vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+       struct vm_struct *vm;
+       unsigned long addr, next = 0;
+       pmd_t *pmd;
+
+       /* we're still single threaded hence no lock needed here */
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               addr = (unsigned long)vm->addr;
+               if (addr < next)
+                       continue;
+
+               /*
+                * Check if this vm starts on an odd section boundary.
+                * If so and the first section entry for this PMD is free
+                * then we block the corresponding virtual address.
+                */
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr);
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr & PMD_MASK);
+               }
+
+               /*
+                * Then check if this vm ends on an odd section boundary.
+                * If so and the second section entry for this PMD is empty
+                * then we block the corresponding virtual address.
+                */
+               addr += vm->size;
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr) + 1;
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr);
+               }
+
+               /* no need to look at any vm entry until we hit the next PMD */
+               next = (addr + PMD_SIZE - 1) & PMD_MASK;
+       }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        if (mdesc->map_io)
                mdesc->map_io();
+       fill_pmd_gaps();
 
        /*
         * Finally flush the caches and tlb to ensure that we're in a
index a84aafce2a129e311a0943be0975db3823b88402..a1044f43becd380cdc7216e082fbf267b97a3fae 100644 (file)
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 0f3ab06d222260c63d5866e6d6b7ab0ac643e761..eab3492a45c5c5244eca42d58354cf6d8a092836 100644 (file)
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index 084ef95274cd78ceb51b1ea7a208a7a5e486199a..813ed103f45edc0ac13042e8c18a922b909a8710 100644 (file)
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
 
 extern unsigned long long native_read_tsc(void);
 
-extern int native_rdmsr_safe_regs(u32 regs[8]);
-extern int native_wrmsr_safe_regs(u32 regs[8]);
+extern int rdmsr_safe_regs(u32 regs[8]);
+extern int wrmsr_safe_regs(u32 regs[8]);
 
 static __always_inline unsigned long long __native_read_tsc(void)
 {
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
        return err;
 }
 
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
-       u32 gprs[8] = { 0 };
-       int err;
-
-       gprs[1] = msr;
-       gprs[7] = 0x9c5a203a;
-
-       err = native_rdmsr_safe_regs(gprs);
-
-       *p = gprs[0] | ((u64)gprs[2] << 32);
-
-       return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
-       u32 gprs[8] = { 0 };
-
-       gprs[0] = (u32)val;
-       gprs[1] = msr;
-       gprs[2] = val >> 32;
-       gprs[7] = 0x9c5a203a;
-
-       return native_wrmsr_safe_regs(gprs);
-}
-
-static inline int rdmsr_safe_regs(u32 regs[8])
-{
-       return native_rdmsr_safe_regs(regs);
-}
-
-static inline int wrmsr_safe_regs(u32 regs[8])
-{
-       return native_wrmsr_safe_regs(regs);
-}
-
 #define rdtscl(low)                                            \
        ((low) = (u32)__native_read_tsc())
 
@@ -237,6 +200,8 @@ do {                                                        \
        (high) = (u32)(_l >> 32);                       \
 } while (0)
 
+#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+
 #define rdtscp(low, high, aux)                                 \
 do {                                                            \
        unsigned long long _val = native_read_tscp(&(aux));     \
@@ -248,8 +213,7 @@ do {                                                            \
 
 #endif /* !CONFIG_PARAVIRT */
 
-
-#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val),                \
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val),            \
                                             (u32)((val) >> 32))
 
 #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
index 6cbbabf52707f9d492a3e3d6687c37b24369cdfb..0b47ddb6f00b300366253a343c4a78424a8fc280 100644 (file)
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
        return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
 }
 
-static inline int paravirt_rdmsr_regs(u32 *regs)
-{
-       return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
-}
-
 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 {
        return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
 }
 
-static inline int paravirt_wrmsr_regs(u32 *regs)
-{
-       return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
-}
-
 /* These should all do BUG_ON(_err), but our headers are too tangled. */
 #define rdmsr(msr, val1, val2)                 \
 do {                                           \
@@ -176,9 +166,6 @@ do {                                                \
        _err;                                   \
 })
 
-#define rdmsr_safe_regs(regs)  paravirt_rdmsr_regs(regs)
-#define wrmsr_safe_regs(regs)  paravirt_wrmsr_regs(regs)
-
 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 {
        int err;
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
        *p = paravirt_read_msr(msr, &err);
        return err;
 }
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
-       u32 gprs[8] = { 0 };
-       int err;
-
-       gprs[1] = msr;
-       gprs[7] = 0x9c5a203a;
-
-       err = paravirt_rdmsr_regs(gprs);
-
-       *p = gprs[0] | ((u64)gprs[2] << 32);
-
-       return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
-       u32 gprs[8] = { 0 };
-
-       gprs[0] = (u32)val;
-       gprs[1] = msr;
-       gprs[2] = val >> 32;
-       gprs[7] = 0x9c5a203a;
-
-       return paravirt_wrmsr_regs(gprs);
-}
 
 static inline u64 paravirt_read_tsc(void)
 {
@@ -252,6 +213,8 @@ do {                                                \
        high = _l >> 32;                        \
 } while (0)
 
+#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+
 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 {
        return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
index 8e8b9a4987ee0225d0a7e38504436f53f4c81d30..8613cbb7ba41e63d8626e2bae1356793ca279a1d 100644 (file)
@@ -153,9 +153,7 @@ struct pv_cpu_ops {
        /* MSR, PMC and TSR operations.
           err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
        u64 (*read_msr)(unsigned int msr, int *err);
-       int (*rdmsr_regs)(u32 *regs);
        int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
-       int (*wrmsr_regs)(u32 *regs);
 
        u64 (*read_tsc)(void);
        u64 (*read_pmc)(int counter);
index 1e9bed14f7aecd9bfe6bf7ad2df2f8772cd5d5f2..f3971bbcd1de3ee52ddda97b8bd02fbcc815aef1 100644 (file)
@@ -48,7 +48,7 @@ struct arch_uprobe_task {
 #endif
 };
 
-extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
 extern int  arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
 extern int  arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
 extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
index 6ab6aa2fdfdd21ef0192b0eb11a99c1a0531f8ab..bac4c3804cc7530e179630e87facca153576d026 100644 (file)
@@ -32,7 +32,9 @@ obj-$(CONFIG_PERF_EVENTS)             += perf_event.o
 
 ifdef CONFIG_PERF_EVENTS
 obj-$(CONFIG_CPU_SUP_AMD)              += perf_event_amd.o
-obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_p6.o perf_event_p4.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_uncore.o
 endif
 
 obj-$(CONFIG_X86_MCE)                  += mcheck/
index 146bb6218eec3daa3cdbe0472c04bcfac7500e8a..9d92e19039f05f84b4c172de7c1c243a4dad72e0 100644 (file)
 
 #include "cpu.h"
 
+static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+{
+       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       u32 gprs[8] = { 0 };
+       int err;
+
+       WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+       gprs[1] = msr;
+       gprs[7] = 0x9c5a203a;
+
+       err = rdmsr_safe_regs(gprs);
+
+       *p = gprs[0] | ((u64)gprs[2] << 32);
+
+       return err;
+}
+
+static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+{
+       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       u32 gprs[8] = { 0 };
+
+       WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+       gprs[0] = (u32)val;
+       gprs[1] = msr;
+       gprs[2] = val >> 32;
+       gprs[7] = 0x9c5a203a;
+
+       return wrmsr_safe_regs(gprs);
+}
+
 #ifdef CONFIG_X86_32
 /*
  *     B step AMD K6 before B 9730xxxx have hardware bugs that can cause
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
                u64 val;
 
-               if (!rdmsrl_amd_safe(0xc0011005, &val)) {
+               if (!rdmsrl_safe(0xc0011005, &val)) {
                        val |= 1ULL << 54;
-                       wrmsrl_amd_safe(0xc0011005, val);
+                       wrmsrl_safe(0xc0011005, val);
                        rdmsrl(0xc0011005, val);
                        if (val & (1ULL << 54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
                if (err == 0) {
                        mask |= (1 << 10);
-                       checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+                       wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
 
index 6b9333b429ba1910637bf29c63bbe0712064022e..5bbc082c47ad8d1fef9950c5dda873f1fdd5975c 100644 (file)
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void)
                index_max = msr_range_array[i].max;
 
                for (index = index_min; index < index_max; index++) {
-                       if (rdmsrl_amd_safe(index, &val))
+                       if (rdmsrl_safe(index, &val))
                                continue;
                        printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
                }
index c4706cf9c011d8fd068d205ed0b669142c112400..e677d9923f4f1d483721b3a0f1aa566d2dfbbfa7 100644 (file)
 
 #include "perf_event.h"
 
-#if 0
-#undef wrmsrl
-#define wrmsrl(msr, val)                                       \
-do {                                                           \
-       trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
-                       (unsigned long)(val));                  \
-       native_write_msr((msr), (u32)((u64)(val)),              \
-                       (u32)((u64)(val) >> 32));               \
-} while (0)
-#endif
-
 struct x86_pmu x86_pmu __read_mostly;
 
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -86,7 +75,7 @@ u64 x86_perf_event_update(struct perf_event *event)
         */
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdmsrl(hwc->event_base, new_raw_count);
+       rdpmcl(hwc->event_base_rdpmc, new_raw_count);
 
        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                                        new_raw_count) != prev_raw_count)
@@ -222,7 +211,7 @@ static bool check_hw_exists(void)
         * that don't trap on the MSR access and always return 0s.
         */
        val = 0xabcdUL;
-       ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
+       ret = wrmsrl_safe(x86_pmu_event_addr(0), val);
        ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
        if (ret || val != val_new)
                goto msr_fail;
@@ -637,7 +626,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
        c = sched->constraints[sched->state.event];
 
        /* Prefer fixed purpose counters */
-       if (x86_pmu.num_counters_fixed) {
+       if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) {
                idx = X86_PMC_IDX_FIXED;
                for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
                        if (!__test_and_set_bit(idx, sched->state.used))
@@ -704,8 +693,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
 /*
  * Assign a counter for each event.
  */
-static int perf_assign_events(struct event_constraint **constraints, int n,
-                             int wmin, int wmax, int *assign)
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int *assign)
 {
        struct perf_sched sched;
 
@@ -830,9 +819,11 @@ static inline void x86_assign_hw_event(struct perf_event *event,
        } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
                hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
+               hwc->event_base_rdpmc = (hwc->idx - X86_PMC_IDX_FIXED) | 1<<30;
        } else {
                hwc->config_base = x86_pmu_config_addr(hwc->idx);
                hwc->event_base  = x86_pmu_event_addr(hwc->idx);
+               hwc->event_base_rdpmc = hwc->idx;
        }
 }
 
@@ -1649,7 +1640,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
                              struct device_attribute *attr,
                              const char *buf, size_t count)
 {
-       unsigned long val = simple_strtoul(buf, NULL, 0);
+       unsigned long val;
+       ssize_t ret;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
 
        if (!!val != !!x86_pmu.attr_rdpmc) {
                x86_pmu.attr_rdpmc = !!val;
@@ -1863,7 +1859,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
                else
                        misc |= PERF_RECORD_MISC_GUEST_KERNEL;
        } else {
-               if (user_mode(regs))
+               if (!kernel_ip(regs->ip))
                        misc |= PERF_RECORD_MISC_USER;
                else
                        misc |= PERF_RECORD_MISC_KERNEL;
index 7241e2fc3c17c87b766c35f8d412b8692436b825..83238f2a12b2e539c7991d7068ae36434eb2efcc 100644 (file)
 
 #include <linux/perf_event.h>
 
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val)                                               \
+do {                                                                   \
+       unsigned int _msr = (msr);                                      \
+       u64 _val = (val);                                               \
+       trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr),         \
+                       (unsigned long long)(_val));                    \
+       native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32));       \
+} while (0)
+#endif
+
 /*
  *          |   NHM/WSM    |      SNB     |
  * register -------------------------------
@@ -57,7 +69,7 @@ struct amd_nb {
 };
 
 /* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS                4
+#define MAX_PEBS_EVENTS                8
 
 /*
  * A debug store configuration.
@@ -366,6 +378,7 @@ struct x86_pmu {
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
        void            (*pebs_aliases)(struct perf_event *event);
+       int             max_pebs_events;
 
        /*
         * Intel LBR
@@ -468,6 +481,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 
 void x86_pmu_enable_all(int added);
 
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int *assign);
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 
 void x86_pmu_stop(struct perf_event *event, int flags);
index 187c294bc6583424e8613df62a883740ab8fca1a..8408e37f5fa4f6c6dfc997e3a16717add4f7ffd5 100644 (file)
@@ -1003,11 +1003,11 @@ static void intel_pmu_reset(void)
        printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
-               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
+               wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
+               wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
-               checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+               wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
 
        if (ds)
                ds->bts_index = ds->bts_buffer_base;
@@ -1800,6 +1800,8 @@ __init int intel_pmu_init(void)
        x86_pmu.events_maskl            = ebx.full;
        x86_pmu.events_mask_len         = eax.split.mask_length;
 
+       x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
        /*
         * Quirk: v2 perfmon does not report fixed-purpose events, so
         * assume at least 3 events:
index 35e2192df9f4ae078ae8ecff742f72f30881816b..026373edef7f2204ace094649cdc7f43735d72ce 100644 (file)
@@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
         * Should not happen, we program the threshold at 1 and do not
         * set a reset value.
         */
-       WARN_ON_ONCE(n > 1);
+       WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
        at += n - 1;
 
        __intel_pmu_pebs_event(event, iregs, at);
@@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
         * Should not happen, we program the threshold at 1 and do not
         * set a reset value.
         */
-       WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+       WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
 
        for ( ; at < top; at++) {
-               for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+               for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
                        event = cpuc->events[bit];
                        if (!test_bit(bit, cpuc->active_mask))
                                continue;
@@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                        break;
                }
 
-               if (!event || bit >= MAX_PEBS_EVENTS)
+               if (!event || bit >= x86_pmu.max_pebs_events)
                        continue;
 
                __intel_pmu_pebs_event(event, iregs, at);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
new file mode 100644 (file)
index 0000000..6f43f95
--- /dev/null
@@ -0,0 +1,1703 @@
+#include "perf_event_intel_uncore.h"
+
+static struct intel_uncore_type *empty_uncore[] = { NULL, };
+static struct intel_uncore_type **msr_uncores = empty_uncore;
+static struct intel_uncore_type **pci_uncores = empty_uncore;
+/* pci bus to socket mapping */
+static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
+
+static DEFINE_RAW_SPINLOCK(uncore_box_lock);
+
+/* mask of cpus that collect uncore events */
+static cpumask_t uncore_cpu_mask;
+
+/* constraint for the fixed counter */
+static struct event_constraint constraint_fixed =
+       EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
+DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
+DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
+
+/* Sandy Bridge-EP uncore support */
+static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       int box_ctl = uncore_pci_box_ctl(box);
+       u32 config;
+
+       pci_read_config_dword(pdev, box_ctl, &config);
+       config |= SNBEP_PMON_BOX_CTL_FRZ;
+       pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       int box_ctl = uncore_pci_box_ctl(box);
+       u32 config;
+
+       pci_read_config_dword(pdev, box_ctl, &config);
+       config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+       pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base, hwc->config |
+                               SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+       u64 count;
+
+       pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
+       pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+       return count;
+}
+
+static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
+                               SNBEP_PMON_BOX_CTL_INT);
+}
+
+static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+       u64 config;
+       unsigned msr;
+
+       msr = uncore_msr_box_ctl(box);
+       if (msr) {
+               rdmsrl(msr, config);
+               config |= SNBEP_PMON_BOX_CTL_FRZ;
+               wrmsrl(msr, config);
+               return;
+       }
+}
+
+static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       u64 config;
+       unsigned msr;
+
+       msr = uncore_msr_box_ctl(box);
+       if (msr) {
+               rdmsrl(msr, config);
+               config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+               wrmsrl(msr, config);
+               return;
+       }
+}
+
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       wrmsrl(hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u64 count;
+
+       rdmsrl(hwc->event_base, count);
+       return count;
+}
+
+static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+       unsigned msr = uncore_msr_box_ctl(box);
+       if (msr)
+               wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
+}
+
+static struct attribute *snbep_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       NULL,
+};
+
+static struct attribute *snbep_uncore_ubox_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh5.attr,
+       NULL,
+};
+
+static struct attribute *snbep_uncore_pcu_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_occ_sel.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh5.attr,
+       &format_attr_occ_invert.attr,
+       &format_attr_occ_edge.attr,
+       NULL,
+};
+
+static struct uncore_event_desc snbep_uncore_imc_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0xff"),
+       INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
+       INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
+       { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
+       INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+       INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
+       INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
+       { /* end: all zeroes */ },
+};
+
+static struct attribute_group snbep_uncore_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_ubox_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_pcu_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_pcu_formats_attr,
+};
+
+static struct intel_uncore_ops snbep_uncore_msr_ops = {
+       .init_box       = snbep_uncore_msr_init_box,
+       .disable_box    = snbep_uncore_msr_disable_box,
+       .enable_box     = snbep_uncore_msr_enable_box,
+       .disable_event  = snbep_uncore_msr_disable_event,
+       .enable_event   = snbep_uncore_msr_enable_event,
+       .read_counter   = snbep_uncore_msr_read_counter,
+};
+
+static struct intel_uncore_ops snbep_uncore_pci_ops = {
+       .init_box       = snbep_uncore_pci_init_box,
+       .disable_box    = snbep_uncore_pci_disable_box,
+       .enable_box     = snbep_uncore_pci_enable_box,
+       .disable_event  = snbep_uncore_pci_disable_event,
+       .enable_event   = snbep_uncore_pci_enable_event,
+       .read_counter   = snbep_uncore_pci_read_counter,
+};
+
+static struct event_constraint snbep_uncore_cbox_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
+       UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+       EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snbep_uncore_ubox = {
+       .name           = "ubox",
+       .num_counters   = 2,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 44,
+       .fixed_ctr_bits = 48,
+       .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
+       .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
+       .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+       .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+       .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+       .ops            = &snbep_uncore_msr_ops,
+       .format_group   = &snbep_uncore_ubox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_cbox = {
+       .name           = "cbox",
+       .num_counters   = 4,
+       .num_boxes      = 8,
+       .perf_ctr_bits  = 44,
+       .event_ctl      = SNBEP_C0_MSR_PMON_CTL0,
+       .perf_ctr       = SNBEP_C0_MSR_PMON_CTR0,
+       .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
+       .box_ctl        = SNBEP_C0_MSR_PMON_BOX_CTL,
+       .msr_offset     = SNBEP_CBO_MSR_OFFSET,
+       .constraints    = snbep_uncore_cbox_constraints,
+       .ops            = &snbep_uncore_msr_ops,
+       .format_group   = &snbep_uncore_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_pcu = {
+       .name           = "pcu",
+       .num_counters   = 4,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 48,
+       .perf_ctr       = SNBEP_PCU_MSR_PMON_CTR0,
+       .event_ctl      = SNBEP_PCU_MSR_PMON_CTL0,
+       .event_mask     = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+       .box_ctl        = SNBEP_PCU_MSR_PMON_BOX_CTL,
+       .ops            = &snbep_uncore_msr_ops,
+       .format_group   = &snbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *snbep_msr_uncores[] = {
+       &snbep_uncore_ubox,
+       &snbep_uncore_cbox,
+       &snbep_uncore_pcu,
+       NULL,
+};
+
+#define SNBEP_UNCORE_PCI_COMMON_INIT()                         \
+       .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
+       .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
+       .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
+       .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
+       .ops            = &snbep_uncore_pci_ops,                \
+       .format_group   = &snbep_uncore_format_group
+
+static struct intel_uncore_type snbep_uncore_ha = {
+       .name           = "ha",
+       .num_counters   = 4,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 48,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_imc = {
+       .name           = "imc",
+       .num_counters   = 4,
+       .num_boxes      = 4,
+       .perf_ctr_bits  = 48,
+       .fixed_ctr_bits = 48,
+       .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+       .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+       .event_descs    = snbep_uncore_imc_events,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_qpi = {
+       .name           = "qpi",
+       .num_counters   = 4,
+       .num_boxes      = 2,
+       .perf_ctr_bits  = 48,
+       .event_descs    = snbep_uncore_qpi_events,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+
+static struct intel_uncore_type snbep_uncore_r2pcie = {
+       .name           = "r2pcie",
+       .num_counters   = 4,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 44,
+       .constraints    = snbep_uncore_r2pcie_constraints,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_r3qpi = {
+       .name           = "r3qpi",
+       .num_counters   = 3,
+       .num_boxes      = 2,
+       .perf_ctr_bits  = 44,
+       .constraints    = snbep_uncore_r3qpi_constraints,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type *snbep_pci_uncores[] = {
+       &snbep_uncore_ha,
+       &snbep_uncore_imc,
+       &snbep_uncore_qpi,
+       &snbep_uncore_r2pcie,
+       &snbep_uncore_r3qpi,
+       NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
+       { /* Home Agent */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
+               .driver_data = (unsigned long)&snbep_uncore_ha,
+       },
+       { /* MC Channel 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* MC Channel 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* MC Channel 2 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* MC Channel 3 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* QPI Port 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
+               .driver_data = (unsigned long)&snbep_uncore_qpi,
+       },
+       { /* QPI Port 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
+               .driver_data = (unsigned long)&snbep_uncore_qpi,
+       },
+       { /* P2PCIe */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
+               .driver_data = (unsigned long)&snbep_uncore_r2pcie,
+       },
+       { /* R3QPI Link 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
+               .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+       },
+       { /* R3QPI Link 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
+               .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+       },
+       { /* end: all zeroes */ }
+};
+
+static struct pci_driver snbep_uncore_pci_driver = {
+       .name           = "snbep_uncore",
+       .id_table       = snbep_uncore_pci_ids,
+};
+
+/*
+ * build pci bus to socket mapping
+ */
+static void snbep_pci2phy_map_init(void)
+{
+       struct pci_dev *ubox_dev = NULL;
+       int i, bus, nodeid;
+       u32 config;
+
+       while (1) {
+               /* find the UBOX device */
+               ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                       PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
+                                       ubox_dev);
+               if (!ubox_dev)
+                       break;
+               bus = ubox_dev->bus->number;
+               /* get the Node ID of the local register */
+               pci_read_config_dword(ubox_dev, 0x40, &config);
+               nodeid = config;
+               /* get the Node ID mapping */
+               pci_read_config_dword(ubox_dev, 0x54, &config);
+               /*
+                * every three bits in the Node ID mapping register maps
+                * to a particular node.
+                */
+               for (i = 0; i < 8; i++) {
+                       if (nodeid == ((config >> (3 * i)) & 0x7)) {
+                               pcibus_to_physid[bus] = i;
+                               break;
+                       }
+               }
+       };
+       return;
+}
+/* end of Sandy Bridge-EP uncore support */
+
+
+/* Sandy Bridge uncore support */
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+               wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+       else
+               wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
+}
+
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       wrmsrl(event->hw.config_base, 0);
+}
+
+static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       u64 count;
+       rdmsrl(event->hw.event_base, count);
+       return count;
+}
+
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->pmu_idx == 0) {
+               wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+                       SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+       }
+}
+
+static struct attribute *snb_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask5.attr,
+       NULL,
+};
+
+static struct attribute_group snb_uncore_format_group = {
+       .name = "format",
+       .attrs = snb_uncore_formats_attr,
+};
+
+static struct intel_uncore_ops snb_uncore_msr_ops = {
+       .init_box       = snb_uncore_msr_init_box,
+       .disable_event  = snb_uncore_msr_disable_event,
+       .enable_event   = snb_uncore_msr_enable_event,
+       .read_counter   = snb_uncore_msr_read_counter,
+};
+
+static struct event_constraint snb_uncore_cbox_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
+       EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snb_uncore_cbox = {
+       .name           = "cbox",
+       .num_counters   = 2,
+       .num_boxes      = 4,
+       .perf_ctr_bits  = 44,
+       .fixed_ctr_bits = 48,
+       .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
+       .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
+       .fixed_ctr      = SNB_UNC_FIXED_CTR,
+       .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
+       .single_fixed   = 1,
+       .event_mask     = SNB_UNC_RAW_EVENT_MASK,
+       .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
+       .constraints    = snb_uncore_cbox_constraints,
+       .ops            = &snb_uncore_msr_ops,
+       .format_group   = &snb_uncore_format_group,
+};
+
+static struct intel_uncore_type *snb_msr_uncores[] = {
+       &snb_uncore_cbox,
+       NULL,
+};
+/* end of Sandy Bridge uncore support */
+
+/* Nehalem uncore support */
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
+               NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+}
+
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+               wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+       else
+               wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
+}
+
+static struct attribute *nhm_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask8.attr,
+       NULL,
+};
+
+static struct attribute_group nhm_uncore_format_group = {
+       .name = "format",
+       .attrs = nhm_uncore_formats_attr,
+};
+
+static struct uncore_event_desc nhm_uncore_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0xff"),
+       INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
+       INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
+       .disable_box    = nhm_uncore_msr_disable_box,
+       .enable_box     = nhm_uncore_msr_enable_box,
+       .disable_event  = snb_uncore_msr_disable_event,
+       .enable_event   = nhm_uncore_msr_enable_event,
+       .read_counter   = snb_uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type nhm_uncore = {
+       .name           = "",
+       .num_counters   = 8,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 48,
+       .fixed_ctr_bits = 48,
+       .event_ctl      = NHM_UNC_PERFEVTSEL0,
+       .perf_ctr       = NHM_UNC_UNCORE_PMC0,
+       .fixed_ctr      = NHM_UNC_FIXED_CTR,
+       .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
+       .event_mask     = NHM_UNC_RAW_EVENT_MASK,
+       .event_descs    = nhm_uncore_events,
+       .ops            = &nhm_uncore_msr_ops,
+       .format_group   = &nhm_uncore_format_group,
+};
+
+static struct intel_uncore_type *nhm_msr_uncores[] = {
+       &nhm_uncore,
+       NULL,
+};
+/* end of Nehalem uncore support */
+
+static void uncore_assign_hw_event(struct intel_uncore_box *box,
+                               struct perf_event *event, int idx)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       hwc->idx = idx;
+       hwc->last_tag = ++box->tags[idx];
+
+       if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
+               hwc->event_base = uncore_fixed_ctr(box);
+               hwc->config_base = uncore_fixed_ctl(box);
+               return;
+       }
+
+       hwc->config_base = uncore_event_ctl(box, hwc->idx);
+       hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
+}
+
+static void uncore_perf_event_update(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       u64 prev_count, new_count, delta;
+       int shift;
+
+       if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+               shift = 64 - uncore_fixed_ctr_bits(box);
+       else
+               shift = 64 - uncore_perf_ctr_bits(box);
+
+       /* the hrtimer might modify the previous event value */
+again:
+       prev_count = local64_read(&event->hw.prev_count);
+       new_count = uncore_read_counter(box, event);
+       if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
+               goto again;
+
+       delta = (new_count << shift) - (prev_count << shift);
+       delta >>= shift;
+
+       local64_add(delta, &event->count);
+}
+
+/*
+ * The overflow interrupt is unavailable for SandyBridge-EP, is broken
+ * for SandyBridge. So we use hrtimer to periodically poll the counter
+ * to avoid overflow.
+ */
+static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
+{
+       struct intel_uncore_box *box;
+       unsigned long flags;
+       int bit;
+
+       box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
+       if (!box->n_active || box->cpu != smp_processor_id())
+               return HRTIMER_NORESTART;
+       /*
+        * disable local interrupt to prevent uncore_pmu_event_start/stop
+        * to interrupt the update process
+        */
+       local_irq_save(flags);
+
+       for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
+               uncore_perf_event_update(box, box->events[bit]);
+
+       local_irq_restore(flags);
+
+       hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
+       return HRTIMER_RESTART;
+}
+
+static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
+{
+       __hrtimer_start_range_ns(&box->hrtimer,
+                       ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
+                       HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
+{
+       hrtimer_cancel(&box->hrtimer);
+}
+
+static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
+{
+       hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       box->hrtimer.function = uncore_pmu_hrtimer;
+}
+
+struct intel_uncore_box *uncore_alloc_box(int cpu)
+{
+       struct intel_uncore_box *box;
+
+       box = kmalloc_node(sizeof(*box), GFP_KERNEL | __GFP_ZERO,
+                          cpu_to_node(cpu));
+       if (!box)
+               return NULL;
+
+       uncore_pmu_init_hrtimer(box);
+       atomic_set(&box->refcnt, 1);
+       box->cpu = -1;
+       box->phys_id = -1;
+
+       return box;
+}
+
+static struct intel_uncore_box *
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+{
+       static struct intel_uncore_box *box;
+
+       box = *per_cpu_ptr(pmu->box, cpu);
+       if (box)
+               return box;
+
+       raw_spin_lock(&uncore_box_lock);
+       list_for_each_entry(box, &pmu->box_list, list) {
+               if (box->phys_id == topology_physical_package_id(cpu)) {
+                       atomic_inc(&box->refcnt);
+                       *per_cpu_ptr(pmu->box, cpu) = box;
+                       break;
+               }
+       }
+       raw_spin_unlock(&uncore_box_lock);
+
+       return *per_cpu_ptr(pmu->box, cpu);
+}
+
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+       return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+       /*
+        * perf core schedules event on the basis of cpu, uncore events are
+        * collected by one of the cpus inside a physical package.
+        */
+       return uncore_pmu_to_box(uncore_event_to_pmu(event),
+                                smp_processor_id());
+}
+
+static int uncore_collect_events(struct intel_uncore_box *box,
+                               struct perf_event *leader, bool dogrp)
+{
+       struct perf_event *event;
+       int n, max_count;
+
+       max_count = box->pmu->type->num_counters;
+       if (box->pmu->type->fixed_ctl)
+               max_count++;
+
+       if (box->n_events >= max_count)
+               return -EINVAL;
+
+       n = box->n_events;
+       box->event_list[n] = leader;
+       n++;
+       if (!dogrp)
+               return n;
+
+       list_for_each_entry(event, &leader->sibling_list, group_entry) {
+               if (event->state <= PERF_EVENT_STATE_OFF)
+                       continue;
+
+               if (n >= max_count)
+                       return -EINVAL;
+
+               box->event_list[n] = event;
+               n++;
+       }
+       return n;
+}
+
+static struct event_constraint *
+uncore_event_constraint(struct intel_uncore_type *type,
+                       struct perf_event *event)
+{
+       struct event_constraint *c;
+
+       if (event->hw.config == ~0ULL)
+               return &constraint_fixed;
+
+       if (type->constraints) {
+               for_each_event_constraint(c, type->constraints) {
+                       if ((event->hw.config & c->cmask) == c->code)
+                               return c;
+               }
+       }
+
+       return &type->unconstrainted;
+}
+
+static int uncore_assign_events(struct intel_uncore_box *box,
+                               int assign[], int n)
+{
+       unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+       struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
+       int i, ret, wmin, wmax;
+       struct hw_perf_event *hwc;
+
+       bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
+
+       for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
+               c = uncore_event_constraint(box->pmu->type,
+                               box->event_list[i]);
+               constraints[i] = c;
+               wmin = min(wmin, c->weight);
+               wmax = max(wmax, c->weight);
+       }
+
+       /* fastpath, try to reuse previous register */
+       for (i = 0; i < n; i++) {
+               hwc = &box->event_list[i]->hw;
+               c = constraints[i];
+
+               /* never assigned */
+               if (hwc->idx == -1)
+                       break;
+
+               /* constraint still honored */
+               if (!test_bit(hwc->idx, c->idxmsk))
+                       break;
+
+               /* not already used */
+               if (test_bit(hwc->idx, used_mask))
+                       break;
+
+               __set_bit(hwc->idx, used_mask);
+               assign[i] = hwc->idx;
+       }
+       if (i == n)
+               return 0;
+
+       /* slow path */
+       ret = perf_assign_events(constraints, n, wmin, wmax, assign);
+       return ret ? -EINVAL : 0;
+}
+
+static void uncore_pmu_event_start(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       int idx = event->hw.idx;
+
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
+
+       if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
+               return;
+
+       event->hw.state = 0;
+       box->events[idx] = event;
+       box->n_active++;
+       __set_bit(idx, box->active_mask);
+
+       local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
+       uncore_enable_event(box, event);
+
+       if (box->n_active == 1) {
+               uncore_enable_box(box);
+               uncore_pmu_start_hrtimer(box);
+       }
+}
+
+static void uncore_pmu_event_stop(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
+               uncore_disable_event(box, event);
+               box->n_active--;
+               box->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+
+               if (box->n_active == 0) {
+                       uncore_disable_box(box);
+                       uncore_pmu_cancel_hrtimer(box);
+               }
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               /*
+                * Drain the remaining delta count out of a event
+                * that we are disabling:
+                */
+               uncore_perf_event_update(box, event);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+}
+
+static int uncore_pmu_event_add(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       struct hw_perf_event *hwc = &event->hw;
+       int assign[UNCORE_PMC_IDX_MAX];
+       int i, n, ret;
+
+       if (!box)
+               return -ENODEV;
+
+       ret = n = uncore_collect_events(box, event, false);
+       if (ret < 0)
+               return ret;
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_ARCH;
+
+       ret = uncore_assign_events(box, assign, n);
+       if (ret)
+               return ret;
+
+       /* save events moving to new counters */
+       for (i = 0; i < box->n_events; i++) {
+               event = box->event_list[i];
+               hwc = &event->hw;
+
+               if (hwc->idx == assign[i] &&
+                       hwc->last_tag == box->tags[assign[i]])
+                       continue;
+               /*
+                * Ensure we don't accidentally enable a stopped
+                * counter simply because we rescheduled.
+                */
+               if (hwc->state & PERF_HES_STOPPED)
+                       hwc->state |= PERF_HES_ARCH;
+
+               uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+       }
+
+       /* reprogram moved events into new counters */
+       for (i = 0; i < n; i++) {
+               event = box->event_list[i];
+               hwc = &event->hw;
+
+               if (hwc->idx != assign[i] ||
+                       hwc->last_tag != box->tags[assign[i]])
+                       uncore_assign_hw_event(box, event, assign[i]);
+               else if (i < box->n_events)
+                       continue;
+
+               if (hwc->state & PERF_HES_ARCH)
+                       continue;
+
+               uncore_pmu_event_start(event, 0);
+       }
+       box->n_events = n;
+
+       return 0;
+}
+
+static void uncore_pmu_event_del(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       int i;
+
+       uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+
+       for (i = 0; i < box->n_events; i++) {
+               if (event == box->event_list[i]) {
+                       while (++i < box->n_events)
+                               box->event_list[i - 1] = box->event_list[i];
+
+                       --box->n_events;
+                       break;
+               }
+       }
+
+       event->hw.idx = -1;
+       event->hw.last_tag = ~0ULL;
+}
+
+static void uncore_pmu_event_read(struct perf_event *event)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       uncore_perf_event_update(box, event);
+}
+
+/*
+ * validation ensures the group can be loaded onto the
+ * PMU if it was the only group available.
+ */
+static int uncore_validate_group(struct intel_uncore_pmu *pmu,
+                               struct perf_event *event)
+{
+       struct perf_event *leader = event->group_leader;
+       struct intel_uncore_box *fake_box;
+       int assign[UNCORE_PMC_IDX_MAX];
+       int ret = -EINVAL, n;
+
+       fake_box = uncore_alloc_box(smp_processor_id());
+       if (!fake_box)
+               return -ENOMEM;
+
+       fake_box->pmu = pmu;
+       /*
+        * the event is not yet connected with its
+        * siblings therefore we must first collect
+        * existing siblings, then add the new event
+        * before we can simulate the scheduling
+        */
+       n = uncore_collect_events(fake_box, leader, true);
+       if (n < 0)
+               goto out;
+
+       fake_box->n_events = n;
+       n = uncore_collect_events(fake_box, event, false);
+       if (n < 0)
+               goto out;
+
+       fake_box->n_events = n;
+
+       ret = uncore_assign_events(fake_box, assign, n);
+out:
+       kfree(fake_box);
+       return ret;
+}
+
+int uncore_pmu_event_init(struct perf_event *event)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       struct hw_perf_event *hwc = &event->hw;
+       int ret;
+
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       pmu = uncore_event_to_pmu(event);
+       /* no device found for this pmu */
+       if (pmu->func_id < 0)
+               return -ENOENT;
+
+       /*
+        * Uncore PMU does measure at all privilege level all the time.
+        * So it doesn't make sense to specify any exclude bits.
+        */
+       if (event->attr.exclude_user || event->attr.exclude_kernel ||
+                       event->attr.exclude_hv || event->attr.exclude_idle)
+               return -EINVAL;
+
+       /* Sampling not supported yet */
+       if (hwc->sample_period)
+               return -EINVAL;
+
+       /*
+        * Place all uncore events for a particular physical package
+        * onto a single cpu
+        */
+       if (event->cpu < 0)
+               return -EINVAL;
+       box = uncore_pmu_to_box(pmu, event->cpu);
+       if (!box || box->cpu < 0)
+               return -EINVAL;
+       event->cpu = box->cpu;
+
+       if (event->attr.config == UNCORE_FIXED_EVENT) {
+               /* no fixed counter */
+               if (!pmu->type->fixed_ctl)
+                       return -EINVAL;
+               /*
+                * if there is only one fixed counter, only the first pmu
+                * can access the fixed counter
+                */
+               if (pmu->type->single_fixed && pmu->pmu_idx > 0)
+                       return -EINVAL;
+               hwc->config = ~0ULL;
+       } else {
+               hwc->config = event->attr.config & pmu->type->event_mask;
+       }
+
+       event->hw.idx = -1;
+       event->hw.last_tag = ~0ULL;
+
+       if (event->group_leader != event)
+               ret = uncore_validate_group(pmu, event);
+       else
+               ret = 0;
+
+       return ret;
+}
+
+static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
+{
+       int ret;
+
+       pmu->pmu = (struct pmu) {
+               .attr_groups    = pmu->type->attr_groups,
+               .task_ctx_nr    = perf_invalid_context,
+               .event_init     = uncore_pmu_event_init,
+               .add            = uncore_pmu_event_add,
+               .del            = uncore_pmu_event_del,
+               .start          = uncore_pmu_event_start,
+               .stop           = uncore_pmu_event_stop,
+               .read           = uncore_pmu_event_read,
+       };
+
+       if (pmu->type->num_boxes == 1) {
+               if (strlen(pmu->type->name) > 0)
+                       sprintf(pmu->name, "uncore_%s", pmu->type->name);
+               else
+                       sprintf(pmu->name, "uncore");
+       } else {
+               sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
+                       pmu->pmu_idx);
+       }
+
+       ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
+       return ret;
+}
+
+static void __init uncore_type_exit(struct intel_uncore_type *type)
+{
+       int i;
+
+       for (i = 0; i < type->num_boxes; i++)
+               free_percpu(type->pmus[i].box);
+       kfree(type->pmus);
+       type->pmus = NULL;
+       kfree(type->attr_groups[1]);
+       type->attr_groups[1] = NULL;
+}
+
+static void uncore_types_exit(struct intel_uncore_type **types)
+{
+       int i;
+       for (i = 0; types[i]; i++)
+               uncore_type_exit(types[i]);
+}
+
+static int __init uncore_type_init(struct intel_uncore_type *type)
+{
+       struct intel_uncore_pmu *pmus;
+       struct attribute_group *events_group;
+       struct attribute **attrs;
+       int i, j;
+
+       pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
+       if (!pmus)
+               return -ENOMEM;
+
+       type->unconstrainted = (struct event_constraint)
+               __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
+                               0, type->num_counters, 0);
+
+       for (i = 0; i < type->num_boxes; i++) {
+               pmus[i].func_id = -1;
+               pmus[i].pmu_idx = i;
+               pmus[i].type = type;
+               INIT_LIST_HEAD(&pmus[i].box_list);
+               pmus[i].box = alloc_percpu(struct intel_uncore_box *);
+               if (!pmus[i].box)
+                       goto fail;
+       }
+
+       if (type->event_descs) {
+               i = 0;
+               while (type->event_descs[i].attr.attr.name)
+                       i++;
+
+               events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+                                       sizeof(*events_group), GFP_KERNEL);
+               if (!events_group)
+                       goto fail;
+
+               attrs = (struct attribute **)(events_group + 1);
+               events_group->name = "events";
+               events_group->attrs = attrs;
+
+               for (j = 0; j < i; j++)
+                       attrs[j] = &type->event_descs[j].attr.attr;
+
+               type->attr_groups[1] = events_group;
+       }
+
+       type->pmus = pmus;
+       return 0;
+fail:
+       uncore_type_exit(type);
+       return -ENOMEM;
+}
+
+static int __init uncore_types_init(struct intel_uncore_type **types)
+{
+       int i, ret;
+
+       for (i = 0; types[i]; i++) {
+               ret = uncore_type_init(types[i]);
+               if (ret)
+                       goto fail;
+       }
+       return 0;
+fail:
+       while (--i >= 0)
+               uncore_type_exit(types[i]);
+       return ret;
+}
+
+static struct pci_driver *uncore_pci_driver;
+static bool pcidrv_registered;
+
+/*
+ * add a pci uncore device
+ */
+static int __devinit uncore_pci_add(struct intel_uncore_type *type,
+                                   struct pci_dev *pdev)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, phys_id;
+
+       phys_id = pcibus_to_physid[pdev->bus->number];
+       if (phys_id < 0)
+               return -ENODEV;
+
+       box = uncore_alloc_box(0);
+       if (!box)
+               return -ENOMEM;
+
+       /*
+        * for performance monitoring unit with multiple boxes,
+        * each box has a different function id.
+        */
+       for (i = 0; i < type->num_boxes; i++) {
+               pmu = &type->pmus[i];
+               if (pmu->func_id == pdev->devfn)
+                       break;
+               if (pmu->func_id < 0) {
+                       pmu->func_id = pdev->devfn;
+                       break;
+               }
+               pmu = NULL;
+       }
+
+       if (!pmu) {
+               kfree(box);
+               return -EINVAL;
+       }
+
+       box->phys_id = phys_id;
+       box->pci_dev = pdev;
+       box->pmu = pmu;
+       uncore_box_init(box);
+       pci_set_drvdata(pdev, box);
+
+       raw_spin_lock(&uncore_box_lock);
+       list_add_tail(&box->list, &pmu->box_list);
+       raw_spin_unlock(&uncore_box_lock);
+
+       return 0;
+}
+
+static void uncore_pci_remove(struct pci_dev *pdev)
+{
+       struct intel_uncore_box *box = pci_get_drvdata(pdev);
+       struct intel_uncore_pmu *pmu = box->pmu;
+       int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+
+       if (WARN_ON_ONCE(phys_id != box->phys_id))
+               return;
+
+       raw_spin_lock(&uncore_box_lock);
+       list_del(&box->list);
+       raw_spin_unlock(&uncore_box_lock);
+
+       for_each_possible_cpu(cpu) {
+               if (*per_cpu_ptr(pmu->box, cpu) == box) {
+                       *per_cpu_ptr(pmu->box, cpu) = NULL;
+                       atomic_dec(&box->refcnt);
+               }
+       }
+
+       WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
+       kfree(box);
+}
+
+static int __devinit uncore_pci_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *id)
+{
+       struct intel_uncore_type *type;
+
+       type = (struct intel_uncore_type *)id->driver_data;
+       return uncore_pci_add(type, pdev);
+}
+
+static int __init uncore_pci_init(void)
+{
+       int ret;
+
+       switch (boot_cpu_data.x86_model) {
+       case 45: /* Sandy Bridge-EP */
+               pci_uncores = snbep_pci_uncores;
+               uncore_pci_driver = &snbep_uncore_pci_driver;
+               snbep_pci2phy_map_init();
+               break;
+       default:
+               return 0;
+       }
+
+       ret = uncore_types_init(pci_uncores);
+       if (ret)
+               return ret;
+
+       uncore_pci_driver->probe = uncore_pci_probe;
+       uncore_pci_driver->remove = uncore_pci_remove;
+
+       ret = pci_register_driver(uncore_pci_driver);
+       if (ret == 0)
+               pcidrv_registered = true;
+       else
+               uncore_types_exit(pci_uncores);
+
+       return ret;
+}
+
+static void __init uncore_pci_exit(void)
+{
+       if (pcidrv_registered) {
+               pcidrv_registered = false;
+               pci_unregister_driver(uncore_pci_driver);
+               uncore_types_exit(pci_uncores);
+       }
+}
+
+static void __cpuinit uncore_cpu_dying(int cpu)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, j;
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       box = *per_cpu_ptr(pmu->box, cpu);
+                       *per_cpu_ptr(pmu->box, cpu) = NULL;
+                       if (box && atomic_dec_and_test(&box->refcnt))
+                               kfree(box);
+               }
+       }
+}
+
+static int __cpuinit uncore_cpu_starting(int cpu)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box, *exist;
+       int i, j, k, phys_id;
+
+       phys_id = topology_physical_package_id(cpu);
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       box = *per_cpu_ptr(pmu->box, cpu);
+                       /* called by uncore_cpu_init? */
+                       if (box && box->phys_id >= 0) {
+                               uncore_box_init(box);
+                               continue;
+                       }
+
+                       for_each_online_cpu(k) {
+                               exist = *per_cpu_ptr(pmu->box, k);
+                               if (exist && exist->phys_id == phys_id) {
+                                       atomic_inc(&exist->refcnt);
+                                       *per_cpu_ptr(pmu->box, cpu) = exist;
+                                       kfree(box);
+                                       box = NULL;
+                                       break;
+                               }
+                       }
+
+                       if (box) {
+                               box->phys_id = phys_id;
+                               uncore_box_init(box);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, j;
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       if (pmu->func_id < 0)
+                               pmu->func_id = j;
+
+                       box = uncore_alloc_box(cpu);
+                       if (!box)
+                               return -ENOMEM;
+
+                       box->pmu = pmu;
+                       box->phys_id = phys_id;
+                       *per_cpu_ptr(pmu->box, cpu) = box;
+               }
+       }
+       return 0;
+}
+
+static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
+                                           int old_cpu, int new_cpu)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, j;
+
+       for (i = 0; uncores[i]; i++) {
+               type = uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       if (old_cpu < 0)
+                               box = uncore_pmu_to_box(pmu, new_cpu);
+                       else
+                               box = uncore_pmu_to_box(pmu, old_cpu);
+                       if (!box)
+                               continue;
+
+                       if (old_cpu < 0) {
+                               WARN_ON_ONCE(box->cpu != -1);
+                               box->cpu = new_cpu;
+                               continue;
+                       }
+
+                       WARN_ON_ONCE(box->cpu != old_cpu);
+                       if (new_cpu >= 0) {
+                               uncore_pmu_cancel_hrtimer(box);
+                               perf_pmu_migrate_context(&pmu->pmu,
+                                               old_cpu, new_cpu);
+                               box->cpu = new_cpu;
+                       } else {
+                               box->cpu = -1;
+                       }
+               }
+       }
+}
+
+static void __cpuinit uncore_event_exit_cpu(int cpu)
+{
+       int i, phys_id, target;
+
+       /* if exiting cpu is used for collecting uncore events */
+       if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+               return;
+
+       /* find a new cpu to collect uncore events */
+       phys_id = topology_physical_package_id(cpu);
+       target = -1;
+       for_each_online_cpu(i) {
+               if (i == cpu)
+                       continue;
+               if (phys_id == topology_physical_package_id(i)) {
+                       target = i;
+                       break;
+               }
+       }
+
+       /* migrate uncore events to the new cpu */
+       if (target >= 0)
+               cpumask_set_cpu(target, &uncore_cpu_mask);
+
+       uncore_change_context(msr_uncores, cpu, target);
+       uncore_change_context(pci_uncores, cpu, target);
+}
+
+static void __cpuinit uncore_event_init_cpu(int cpu)
+{
+       int i, phys_id;
+
+       phys_id = topology_physical_package_id(cpu);
+       for_each_cpu(i, &uncore_cpu_mask) {
+               if (phys_id == topology_physical_package_id(i))
+                       return;
+       }
+
+       cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+       uncore_change_context(msr_uncores, -1, cpu);
+       uncore_change_context(pci_uncores, -1, cpu);
+}
+
+static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
+                                        unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (long)hcpu;
+
+       /* allocate/free data structure for uncore box */
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_UP_PREPARE:
+               uncore_cpu_prepare(cpu, -1);
+               break;
+       case CPU_STARTING:
+               uncore_cpu_starting(cpu);
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_DYING:
+               uncore_cpu_dying(cpu);
+               break;
+       default:
+               break;
+       }
+
+       /* select the cpu that collects uncore events */
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DOWN_FAILED:
+       case CPU_STARTING:
+               uncore_event_init_cpu(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               uncore_event_exit_cpu(cpu);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block uncore_cpu_nb __cpuinitdata = {
+       .notifier_call = uncore_cpu_notifier,
+       /*
+        * to migrate uncore events, our notifier should be executed
+        * before perf core's notifier.
+        */
+       .priority = CPU_PRI_PERF + 1,
+};
+
+static void __init uncore_cpu_setup(void *dummy)
+{
+       uncore_cpu_starting(smp_processor_id());
+}
+
+static int __init uncore_cpu_init(void)
+{
+       int ret, cpu;
+
+       switch (boot_cpu_data.x86_model) {
+       case 26: /* Nehalem */
+       case 30:
+       case 37: /* Westmere */
+       case 44:
+               msr_uncores = nhm_msr_uncores;
+               break;
+       case 42: /* Sandy Bridge */
+               msr_uncores = snb_msr_uncores;
+               break;
+       case 45: /* Sandy Birdge-EP */
+               msr_uncores = snbep_msr_uncores;
+               break;
+       default:
+               return 0;
+       }
+
+       ret = uncore_types_init(msr_uncores);
+       if (ret)
+               return ret;
+
+       get_online_cpus();
+
+       for_each_online_cpu(cpu) {
+               int i, phys_id = topology_physical_package_id(cpu);
+
+               for_each_cpu(i, &uncore_cpu_mask) {
+                       if (phys_id == topology_physical_package_id(i)) {
+                               phys_id = -1;
+                               break;
+                       }
+               }
+               if (phys_id < 0)
+                       continue;
+
+               uncore_cpu_prepare(cpu, phys_id);
+               uncore_event_init_cpu(cpu);
+       }
+       on_each_cpu(uncore_cpu_setup, NULL, 1);
+
+       register_cpu_notifier(&uncore_cpu_nb);
+
+       put_online_cpus();
+
+       return 0;
+}
+
+static int __init uncore_pmus_register(void)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_type *type;
+       int i, j;
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       uncore_pmu_register(pmu);
+               }
+       }
+
+       for (i = 0; pci_uncores[i]; i++) {
+               type = pci_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       uncore_pmu_register(pmu);
+               }
+       }
+
+       return 0;
+}
+
+static int __init intel_uncore_init(void)
+{
+       int ret;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return -ENODEV;
+
+       ret = uncore_pci_init();
+       if (ret)
+               goto fail;
+       ret = uncore_cpu_init();
+       if (ret) {
+               uncore_pci_exit();
+               goto fail;
+       }
+
+       uncore_pmus_register();
+       return 0;
+fail:
+       return ret;
+}
+device_initcall(intel_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
new file mode 100644 (file)
index 0000000..4d52db0
--- /dev/null
@@ -0,0 +1,406 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+#include "perf_event.h"
+
+#define UNCORE_PMU_NAME_LEN            32
+#define UNCORE_BOX_HASH_SIZE           8
+
+#define UNCORE_PMU_HRTIMER_INTERVAL    (60 * NSEC_PER_SEC)
+
+#define UNCORE_FIXED_EVENT             0xffff
+#define UNCORE_PMC_IDX_MAX_GENERIC     8
+#define UNCORE_PMC_IDX_FIXED           UNCORE_PMC_IDX_MAX_GENERIC
+#define UNCORE_PMC_IDX_MAX             (UNCORE_PMC_IDX_FIXED + 1)
+
+#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
+
+/* SNB event control */
+#define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
+#define SNB_UNC_CTL_UMASK_MASK                 0x0000ff00
+#define SNB_UNC_CTL_EDGE_DET                   (1 << 18)
+#define SNB_UNC_CTL_EN                         (1 << 22)
+#define SNB_UNC_CTL_INVERT                     (1 << 23)
+#define SNB_UNC_CTL_CMASK_MASK                 0x1f000000
+#define NHM_UNC_CTL_CMASK_MASK                 0xff000000
+#define NHM_UNC_FIXED_CTR_CTL_EN               (1 << 0)
+
+#define SNB_UNC_RAW_EVENT_MASK                 (SNB_UNC_CTL_EV_SEL_MASK | \
+                                                SNB_UNC_CTL_UMASK_MASK | \
+                                                SNB_UNC_CTL_EDGE_DET | \
+                                                SNB_UNC_CTL_INVERT | \
+                                                SNB_UNC_CTL_CMASK_MASK)
+
+#define NHM_UNC_RAW_EVENT_MASK                 (SNB_UNC_CTL_EV_SEL_MASK | \
+                                                SNB_UNC_CTL_UMASK_MASK | \
+                                                SNB_UNC_CTL_EDGE_DET | \
+                                                SNB_UNC_CTL_INVERT | \
+                                                NHM_UNC_CTL_CMASK_MASK)
+
+/* SNB global control register */
+#define SNB_UNC_PERF_GLOBAL_CTL                 0x391
+#define SNB_UNC_FIXED_CTR_CTRL                  0x394
+#define SNB_UNC_FIXED_CTR                       0x395
+
+/* SNB uncore global control */
+#define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
+#define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
+
+/* SNB Cbo register */
+#define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
+#define SNB_UNC_CBO_0_PER_CTR0                  0x706
+#define SNB_UNC_CBO_MSR_OFFSET                  0x10
+
+/* NHM global control register */
+#define NHM_UNC_PERF_GLOBAL_CTL                 0x391
+#define NHM_UNC_FIXED_CTR                       0x394
+#define NHM_UNC_FIXED_CTR_CTRL                  0x395
+
+/* NHM uncore global control */
+#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
+#define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
+
+/* NHM uncore register */
+#define NHM_UNC_PERFEVTSEL0                     0x3c0
+#define NHM_UNC_UNCORE_PMC0                     0x3b0
+
+/* SNB-EP Box level control */
+#define SNBEP_PMON_BOX_CTL_RST_CTRL    (1 << 0)
+#define SNBEP_PMON_BOX_CTL_RST_CTRS    (1 << 1)
+#define SNBEP_PMON_BOX_CTL_FRZ         (1 << 8)
+#define SNBEP_PMON_BOX_CTL_FRZ_EN      (1 << 16)
+#define SNBEP_PMON_BOX_CTL_INT         (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+                                        SNBEP_PMON_BOX_CTL_RST_CTRS | \
+                                        SNBEP_PMON_BOX_CTL_FRZ_EN)
+/* SNB-EP event control */
+#define SNBEP_PMON_CTL_EV_SEL_MASK     0x000000ff
+#define SNBEP_PMON_CTL_UMASK_MASK      0x0000ff00
+#define SNBEP_PMON_CTL_RST             (1 << 17)
+#define SNBEP_PMON_CTL_EDGE_DET                (1 << 18)
+#define SNBEP_PMON_CTL_EV_SEL_EXT      (1 << 21)       /* only for QPI */
+#define SNBEP_PMON_CTL_EN              (1 << 22)
+#define SNBEP_PMON_CTL_INVERT          (1 << 23)
+#define SNBEP_PMON_CTL_TRESH_MASK      0xff000000
+#define SNBEP_PMON_RAW_EVENT_MASK      (SNBEP_PMON_CTL_EV_SEL_MASK | \
+                                        SNBEP_PMON_CTL_UMASK_MASK | \
+                                        SNBEP_PMON_CTL_EDGE_DET | \
+                                        SNBEP_PMON_CTL_INVERT | \
+                                        SNBEP_PMON_CTL_TRESH_MASK)
+
+/* SNB-EP Ubox event control */
+#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK                0x1f000000
+#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK                \
+                               (SNBEP_PMON_CTL_EV_SEL_MASK | \
+                                SNBEP_PMON_CTL_UMASK_MASK | \
+                                SNBEP_PMON_CTL_EDGE_DET | \
+                                SNBEP_PMON_CTL_INVERT | \
+                                SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+
+/* SNB-EP PCU event control */
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK    0x0000c000
+#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK      0x1f000000
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT      (1 << 30)
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET    (1 << 31)
+#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK      \
+                               (SNBEP_PMON_CTL_EV_SEL_MASK | \
+                                SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+                                SNBEP_PMON_CTL_EDGE_DET | \
+                                SNBEP_PMON_CTL_INVERT | \
+                                SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+                                SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+                                SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+
+/* SNB-EP pci control register */
+#define SNBEP_PCI_PMON_BOX_CTL                 0xf4
+#define SNBEP_PCI_PMON_CTL0                    0xd8
+/* SNB-EP pci counter register */
+#define SNBEP_PCI_PMON_CTR0                    0xa0
+
+/* SNB-EP home agent register */
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0       0x40
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1       0x44
+#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH      0x48
+/* SNB-EP memory controller register */
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL                0xf0
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR                0xd0
+/* SNB-EP QPI register */
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0         0x228
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1         0x22c
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0          0x238
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1          0x23c
+
+/* SNB-EP Ubox register */
+#define SNBEP_U_MSR_PMON_CTR0                  0xc16
+#define SNBEP_U_MSR_PMON_CTL0                  0xc10
+
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL                0xc08
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR                0xc09
+
+/* SNB-EP Cbo register */
+#define SNBEP_C0_MSR_PMON_CTR0                 0xd16
+#define SNBEP_C0_MSR_PMON_CTL0                 0xd10
+#define SNBEP_C0_MSR_PMON_BOX_FILTER           0xd14
+#define SNBEP_C0_MSR_PMON_BOX_CTL              0xd04
+#define SNBEP_CBO_MSR_OFFSET                   0x20
+
+/* SNB-EP PCU register */
+#define SNBEP_PCU_MSR_PMON_CTR0                        0xc36
+#define SNBEP_PCU_MSR_PMON_CTL0                        0xc30
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER          0xc34
+#define SNBEP_PCU_MSR_PMON_BOX_CTL             0xc24
+#define SNBEP_PCU_MSR_CORE_C3_CTR              0x3fc
+#define SNBEP_PCU_MSR_CORE_C6_CTR              0x3fd
+
+struct intel_uncore_ops;
+struct intel_uncore_pmu;
+struct intel_uncore_box;
+struct uncore_event_desc;
+
+struct intel_uncore_type {
+       const char *name;
+       int num_counters;
+       int num_boxes;
+       int perf_ctr_bits;
+       int fixed_ctr_bits;
+       int single_fixed;
+       unsigned perf_ctr;
+       unsigned event_ctl;
+       unsigned event_mask;
+       unsigned fixed_ctr;
+       unsigned fixed_ctl;
+       unsigned box_ctl;
+       unsigned msr_offset;
+       struct event_constraint unconstrainted;
+       struct event_constraint *constraints;
+       struct intel_uncore_pmu *pmus;
+       struct intel_uncore_ops *ops;
+       struct uncore_event_desc *event_descs;
+       const struct attribute_group *attr_groups[3];
+};
+
+#define format_group attr_groups[0]
+
+struct intel_uncore_ops {
+       void (*init_box)(struct intel_uncore_box *);
+       void (*disable_box)(struct intel_uncore_box *);
+       void (*enable_box)(struct intel_uncore_box *);
+       void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
+       void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
+       u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
+};
+
+struct intel_uncore_pmu {
+       struct pmu pmu;
+       char name[UNCORE_PMU_NAME_LEN];
+       int pmu_idx;
+       int func_id;
+       struct intel_uncore_type *type;
+       struct intel_uncore_box ** __percpu box;
+       struct list_head box_list;
+};
+
+struct intel_uncore_box {
+       int phys_id;
+       int n_active;   /* number of active events */
+       int n_events;
+       int cpu;        /* cpu to collect events */
+       unsigned long flags;
+       atomic_t refcnt;
+       struct perf_event *events[UNCORE_PMC_IDX_MAX];
+       struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
+       unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+       u64 tags[UNCORE_PMC_IDX_MAX];
+       struct pci_dev *pci_dev;
+       struct intel_uncore_pmu *pmu;
+       struct hrtimer hrtimer;
+       struct list_head list;
+};
+
+#define UNCORE_BOX_FLAG_INITIATED      0
+
+struct uncore_event_desc {
+       struct kobj_attribute attr;
+       const char *config;
+};
+
+#define INTEL_UNCORE_EVENT_DESC(_name, _config)                        \
+{                                                              \
+       .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
+       .config = _config,                                      \
+}
+
+#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                        \
+static ssize_t __uncore_##_var##_show(struct kobject *kobj,            \
+                               struct kobj_attribute *attr,            \
+                               char *page)                             \
+{                                                                      \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
+       return sprintf(page, _format "\n");                             \
+}                                                                      \
+static struct kobj_attribute format_attr_##_var =                      \
+       __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+
+
+static ssize_t uncore_event_show(struct kobject *kobj,
+                               struct kobj_attribute *attr, char *buf)
+{
+       struct uncore_event_desc *event =
+               container_of(attr, struct uncore_event_desc, attr);
+       return sprintf(buf, "%s", event->config);
+}
+
+static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
+{
+       return box->pmu->type->box_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctr;
+}
+
+static inline
+unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
+{
+       return idx * 4 + box->pmu->type->event_ctl;
+}
+
+static inline
+unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+       return idx * 8 + box->pmu->type->perf_ctr;
+}
+
+static inline
+unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+{
+       if (!box->pmu->type->box_ctl)
+               return 0;
+       return box->pmu->type->box_ctl +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+{
+       if (!box->pmu->type->fixed_ctl)
+               return 0;
+       return box->pmu->type->fixed_ctl +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctr +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
+{
+       return idx + box->pmu->type->event_ctl +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+       return idx + box->pmu->type->perf_ctr +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
+{
+       if (box->pci_dev)
+               return uncore_pci_fixed_ctl(box);
+       else
+               return uncore_msr_fixed_ctl(box);
+}
+
+static inline
+unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
+{
+       if (box->pci_dev)
+               return uncore_pci_fixed_ctr(box);
+       else
+               return uncore_msr_fixed_ctr(box);
+}
+
+static inline
+unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
+{
+       if (box->pci_dev)
+               return uncore_pci_event_ctl(box, idx);
+       else
+               return uncore_msr_event_ctl(box, idx);
+}
+
+static inline
+unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+       if (box->pci_dev)
+               return uncore_pci_perf_ctr(box, idx);
+       else
+               return uncore_msr_perf_ctr(box, idx);
+}
+
+static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
+{
+       return box->pmu->type->perf_ctr_bits;
+}
+
+static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctr_bits;
+}
+
+static inline int uncore_num_counters(struct intel_uncore_box *box)
+{
+       return box->pmu->type->num_counters;
+}
+
+static inline void uncore_disable_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->type->ops->disable_box)
+               box->pmu->type->ops->disable_box(box);
+}
+
+static inline void uncore_enable_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->type->ops->enable_box)
+               box->pmu->type->ops->enable_box(box);
+}
+
+static inline void uncore_disable_event(struct intel_uncore_box *box,
+                               struct perf_event *event)
+{
+       box->pmu->type->ops->disable_event(box, event);
+}
+
+static inline void uncore_enable_event(struct intel_uncore_box *box,
+                               struct perf_event *event)
+{
+       box->pmu->type->ops->enable_event(box, event);
+}
+
+static inline u64 uncore_read_counter(struct intel_uncore_box *box,
+                               struct perf_event *event)
+{
+       return box->pmu->type->ops->read_counter(box, event);
+}
+
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+               if (box->pmu->type->ops->init_box)
+                       box->pmu->type->ops->init_box(box);
+       }
+}
index 47124a73dd73098e1179f174a0a6cb56ea07f227..6c82e4037989a8dc2aa78368cf998a8b21f4cf1f 100644 (file)
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
         * So at moment let leave metrics turned on forever -- it's
         * ok for now but need to be revisited!
         *
-        * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
-        * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
+        * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
+        * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
         */
 }
 
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
         * state we need to clear P4_CCCR_OVF, otherwise interrupt get
         * asserted again and again
         */
-       (void)checking_wrmsrl(hwc->config_base,
+       (void)wrmsrl_safe(hwc->config_base,
                (u64)(p4_config_unpack_cccr(hwc->config)) &
                        ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
 }
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
 
        bind = &p4_pebs_bind_map[idx];
 
-       (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE,     (u64)bind->metric_pebs);
-       (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT,  (u64)bind->metric_vert);
+       (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+       (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT,      (u64)bind->metric_vert);
 }
 
 static void p4_pmu_enable_event(struct perf_event *event)
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
         */
        p4_pmu_enable_pebs(hwc->config);
 
-       (void)checking_wrmsrl(escr_addr, escr_conf);
-       (void)checking_wrmsrl(hwc->config_base,
+       (void)wrmsrl_safe(escr_addr, escr_conf);
+       (void)wrmsrl_safe(hwc->config_base,
                                (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
 }
 
index 32bcfc7dd2300d2043245e22a2351dfa65615484..e4dd0f7a04535f09b83d47d95868bcc1c152dff3 100644 (file)
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base, val);
+       (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base, val);
+       (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 PMU_FORMAT_ATTR(event, "config:0-7"    );
index 9ce885996fd718bc80b5f0bf52efec81e27b3b2d..17fff18a103104431c0e10e1543af6f027f64f72 100644 (file)
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = {
 #endif
        .wbinvd = native_wbinvd,
        .read_msr = native_read_msr_safe,
-       .rdmsr_regs = native_rdmsr_safe_regs,
        .write_msr = native_write_msr_safe,
-       .wrmsr_regs = native_wrmsr_safe_regs,
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
        .read_tscp = native_read_tscp,
index 61cdf7fdf09961757d7a6f8ba595df63a5a123b3..3e215ba687663745242be23b55f766a5c8d98107 100644 (file)
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        task->thread.gs = addr;
                        if (doit) {
                                load_gs_index(0);
-                               ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+                               ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
                        }
                }
                put_cpu();
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                                /* set the selector to 0 to not confuse
                                   __switch_to */
                                loadsegment(fs, 0);
-                               ret = checking_wrmsrl(MSR_FS_BASE, addr);
+                               ret = wrmsrl_safe(MSR_FS_BASE, addr);
                        }
                }
                put_cpu();
index dc4e910a7d964c76dd4935c2986c33e37b1a595b..36fd42091fa7283d71e189ddb55cf29a5e940790 100644 (file)
@@ -409,9 +409,10 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
  * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
  * @mm: the probed address space.
  * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
  * Return 0 on success or a -ve number on error.
  */
-int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
 {
        int ret;
        struct insn insn;
index a311cc59b65d9769e05af4e566f6c997b3139bc4..8d6ef78b5d01c3a180959bd54f7da26ddf325188 100644 (file)
@@ -1,5 +1,5 @@
 #include <linux/module.h>
 #include <asm/msr.h>
 
-EXPORT_SYMBOL(native_rdmsr_safe_regs);
-EXPORT_SYMBOL(native_wrmsr_safe_regs);
+EXPORT_SYMBOL(rdmsr_safe_regs);
+EXPORT_SYMBOL(wrmsr_safe_regs);
index 69fa10623f2181b23740bdf0a4ad5f7919120809..f6d13eefad1063d5e525dbc0cfff1ccc12b34bc9 100644 (file)
@@ -6,13 +6,13 @@
 
 #ifdef CONFIG_X86_64
 /*
- * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
+ * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
  *
  * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
  *
  */
 .macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
        CFI_STARTPROC
        pushq_cfi %rbx
        pushq_cfi %rbp
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs)
 
        _ASM_EXTABLE(1b, 3b)
        CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
 .endm
 
 #else /* X86_32 */
 
 .macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
        CFI_STARTPROC
        pushl_cfi %ebx
        pushl_cfi %ebp
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs)
 
        _ASM_EXTABLE(1b, 3b)
        CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
 .endm
 
 #endif
index 66e6d93598262a2877666f1d1163125207f9c89a..0faad646f5fda8eb40c64a95e685c2c919daadf6 100644 (file)
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
 {
        /* Load these always in case some future AMD CPU supports
           SYSENTER from compat mode too. */
-       checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-       checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
 
        wrmsrl(MSR_CSTAR, ia32_cstar_target);
 }
index ff962d4b821e5162415fa06ddf75e8c57d498b51..ed7d54985d0cb558b6e4177d2891a9609fa3e1e6 100644 (file)
@@ -1124,9 +1124,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .wbinvd = native_wbinvd,
 
        .read_msr = native_read_msr_safe,
-       .rdmsr_regs = native_rdmsr_safe_regs,
        .write_msr = xen_write_msr_safe,
-       .wrmsr_regs = native_wrmsr_safe_regs,
 
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
index 02cf6335e9bdc5bb940ec89fcdbfe63746f9b5df..e7dee617358e810aec57ff25162ff95604fabb3c 100644 (file)
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
-       }
-
-       /* invoke per-policy init */
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
 
+               /* invoke per-policy init */
                if (blkcg_policy_enabled(blkg->q, pol))
                        pol->pd_init_fn(blkg);
        }
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       struct request_queue *q = blkg->q;
        struct blkcg *blkcg = blkg->blkcg;
 
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
index 3c923a7aeb56f1658142b091868c3c29ebffd3c5..93eb3e4f88ce78affc79c5ca6d8b7c7b0759be5a 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
+       int i;
+
        while (true) {
                bool drain = false;
-               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        break;
                msleep(10);
        }
+
+       /*
+        * With queue marked dead, any woken up waiter will fail the
+        * allocation path, so the wakeup chaining is lost and we're
+        * left with hung waiters. We need to wake up those waiters.
+        */
+       if (q->request_fn) {
+               spin_lock_irq(q->queue_lock);
+               for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+                       wake_up_all(&q->rq.wait[i]);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 
 /**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
        spin_lock_irq(lock);
 
        /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-       if (q->queue_lock != &q->__queue_lock)
-               q->queue_lock = &q->__queue_lock;
-
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
+       spin_lock_irq(lock);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
+       spin_unlock_irq(lock);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
index 780354888958cd7ea03aef2c1654b325bc9fb24e..6e4744cbfb56b4ca0d99062a0d9b0437c894016d 100644 (file)
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
                mod_timer(&q->timeout, expiry);
 }
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:     pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-       unsigned long flags;
-       struct request *rq, *tmp;
-       LIST_HEAD(list);
-
-       /*
-        * Not a request based block device, nothing to abort
-        */
-       if (!q->request_fn)
-               return;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       elv_abort_queue(q);
-
-       /*
-        * Splice entries to local list, to avoid deadlocking if entries
-        * get readded to the timeout list by error handling
-        */
-       list_splice_init(&q->timeout_list, &list);
-
-       list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-               blk_abort_request(rq);
-
-       /*
-        * Occasionally, blk_abort_request() will return without
-        * deleting the element from the list. Make sure we add those back
-        * instead of leaving them on the local stack list.
-        */
-       list_splice(&list, &q->timeout_list);
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
index 673c977cc2bfa238e0fe0efa6dddac5193fbccd8..fb52df9744f5fe411908162fbb02257ca0bc097a 100644 (file)
@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk-cgroup.h"
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
  * tunables
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
        return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
        kfree(cfqd->root_group);
 #endif
-       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (!cfq_group_idle)
                cfq_group_idle = 1;
-#else
-               cfq_group_idle = 0;
-#endif
 
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
+#else
+       cfq_group_idle = 0;
+#endif
 
+       ret = -ENOMEM;
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
        kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
index 260fa80ef5750f80f4ebc1415d06abc992e18b8a..9a87daa6f4fbd10202ea9d47ae1a549c806a6fb4 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+       return -ENOIOCTLCMD;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index b5c5ff53cb57f74e89cc59bad8ca6e29df5e1db5..fcb956bb4b4c525875f961e7c2398b20c35723f4 100644 (file)
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
                first_word = 0;
                spin_lock_irq(&b->bm_lock);
        }
-
        /* last page (respectively only page, for first page == last page) */
        last_word = MLPP(el >> LN2_BPL);
-       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+        * ==> e = 32767, el = 32768, last_page = 2,
+        * and now last_word = 0.
+        * We do not want to touch last_page in this case,
+        * as we did not allocate it, it is not present in bitmap->bm_pages.
+        */
+       if (last_word)
+               bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
        /* possibly trailing bits.
         * example: (e & 63) == 63, el will be e+1.
index 9c5c84946b056792fa45d99051bd5c28750db7e1..8e93a6ac9bb65e3497f53c92723a67429ffdf946 100644 (file)
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+               if (req->rq_state & RQ_LOCAL_ABORTED) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
 
                __drbd_chk_io_error(mdev, false);
 
        goto_queue_for_net_read:
 
+               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+       int congested = 0;
+
+       /* If I don't even have good local storage, we can not reasonably try
+        * to pull ahead of the peer. We also need the local reference to make
+        * sure mdev->act_log is there.
+        * Note: caller has to make sure that net_conf is there.
+        */
+       if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+               return;
+
+       if (mdev->net_conf->cong_fill &&
+           atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+               dev_info(DEV, "Congestion-fill threshold reached\n");
+               congested = 1;
+       }
+
+       if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+               dev_info(DEV, "Congestion-extents threshold reached\n");
+               congested = 1;
+       }
+
+       if (congested) {
+               queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+               if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                       _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+               else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+       }
+       put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
                _req_mod(req, queue_for_send_oos);
 
        if (remote &&
-           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-               int congested = 0;
-
-               if (mdev->net_conf->cong_fill &&
-                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-                       dev_info(DEV, "Congestion-fill threshold reached\n");
-                       congested = 1;
-               }
-
-               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-                       dev_info(DEV, "Congestion-extents threshold reached\n");
-                       congested = 1;
-               }
-
-               if (congested) {
-                       queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-               }
-       }
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+               maybe_pull_ahead(mdev);
 
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
index cce7df367b793d111ef875bd5cd30a6f5e08782c..553f43a90953cab40f421658ecb37fa9c20c8c54 100644 (file)
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
+       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
index 264bc77dcb911c7030c787d3ad87cd79b654ce81..a8fddeb3d638ebcdf9a0191beda816d8f1338ccd 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  */
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  *     The size, in bytes, of the data copied into buf.
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       u32 group_allocated;
        struct driver_data *dd = dev_to_disk(dev)->private_data;
        int size = 0;
+
+       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "thermal_shutdown\n");
+       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "write_protect\n");
+       else
+               size += sprintf(buf, "%s", "online\n");
+
+       return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
+{
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       u32 group_allocated;
+       int size = *offset;
        int n;
 
-       size += sprintf(&buf[size], "Hardware\n--------\n");
-       size += sprintf(&buf[size], "S ACTive      : [ 0x");
+       if (!len || size)
+               return 0;
+
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Command Issue : [ 0x");
+       size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Completed     : [ 0x");
+       size += sprintf(&buf[size], "H/ Completed     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                readl(dd->port->completed[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->mmio + HOST_IRQ_STAT));
        size += sprintf(&buf[size], "\n");
 
-       size += sprintf(&buf[size], "Local\n-----\n");
-       size += sprintf(&buf[size], "Allocated    : [ 0x");
+       size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Commands in Q: [ 0x");
+       size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
 {
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       int size = *offset;
 
-       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "thermal_shutdown\n");
-       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "write_protect\n");
-       else
-               size += sprintf(buf, "%s", "online\n");
-
-       return size;
-}
+       if (!len || size)
+               return 0;
 
-static ssize_t mtip_hw_show_flags(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       if (size < 0)
+               return -EINVAL;
 
-       size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
                                                        dd->port->flags);
-       size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
                                                        dd->dd_flag);
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_registers,
+       .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_flags,
+       .llseek = no_llseek,
+};
 
 /*
  * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'registers' sysfs entry\n");
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
-       if (sysfs_create_file(kobj, &dev_attr_flags.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'flags' sysfs entry\n");
        return 0;
 }
 
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
-       sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
        return 0;
 }
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+       if (!dfs_parent)
+               return -1;
+
+       dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+       if (IS_ERR_OR_NULL(dd->dfs_node)) {
+               dev_warn(&dd->pdev->dev,
+                       "Error creating node %s under debugfs\n",
+                                               dd->disk->disk_name);
+               dd->dfs_node = NULL;
+               return -1;
+       }
+
+       debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_flags_fops);
+       debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_regs_fops);
+
+       return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+       debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
  * Perform any init/resume time hardware setup
  *
@@ -3730,6 +3784,7 @@ skip_create_disk:
                mtip_hw_sysfs_init(dd, kobj);
                kobject_put(kobj);
        }
+       mtip_hw_debugfs_init(dd);
 
        if (dd->mtip_svc_handler) {
                set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
        return rv;
 
 kthread_run_error:
+       mtip_hw_debugfs_exit(dd);
+
        /* Delete our gendisk. This also removes the device from /dev */
        del_gendisk(dd->disk);
 
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
                        kobject_put(kobj);
                }
        }
+       mtip_hw_debugfs_exit(dd);
 
        /*
         * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
        }
        mtip_major = error;
 
+       if (!dfs_parent) {
+               dfs_parent = debugfs_create_dir("rssd", NULL);
+               if (IS_ERR_OR_NULL(dfs_parent)) {
+                       printk(KERN_WARNING "Error creating debugfs parent\n");
+                       dfs_parent = NULL;
+               }
+       }
+
        /* Register our PCI operations. */
        error = pci_register_driver(&mtip_pci_driver);
-       if (error)
+       if (error) {
+               debugfs_remove(dfs_parent);
                unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+       }
 
        return error;
 }
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
  */
 static void __exit mtip_exit(void)
 {
+       debugfs_remove_recursive(dfs_parent);
+
        /* Release the allocated major block device number. */
        unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
index b2c88da26b2a7b7f94ff77b6f1a1d2047f6b45f3..f51fc23d17bb0e0025c74ac9f495007dcbc8b784 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID 0x2E
  #define dbg_printk(format, arg...)
 #endif
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 
 enum {
@@ -447,6 +448,8 @@ struct driver_data {
        unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+       struct dentry *dfs_node;
 };
 
 #endif
index aa2712060bfbcd153e446f5adcf80a9d61377e75..9a72277a31df0cb131f879bb8a1503a65a57b7cc 100644 (file)
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
        }
 }
 
+struct mm_plug_cb {
+       struct blk_plug_cb cb;
+       struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+       struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+       spin_lock_irq(&mmcb->card->lock);
+       activate(mmcb->card);
+       spin_unlock_irq(&mmcb->card->lock);
+       kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       struct blk_plug *plug = current->plug;
+       struct mm_plug_cb *mmcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+               if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+                       return 1;
+       }
+       /* Not currently on the callback list */
+       mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+       if (!mmcb)
+               return 0;
+
+       mmcb->card = card;
+       mmcb->cb.callback = mm_unplug;
+       list_add(&mmcb->cb.list, &plug->cb_list);
+       return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 773cf27dc23fc595d6fca48ea8ec6c4992438d18..9ad3b5ec1dc1c521085db47a7928cc8cf1179701 100644 (file)
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
index 60eed4bdd2e4528ae3c3b8871cd65f85d3c34952..e4fb3374dcd2aaa6d0f834cd9394564a3c022a38 100644 (file)
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
        return free;
 }
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
 {
+       if (info->shadow[id].req.u.rw.id != id)
+               return -EINVAL;
+       if (info->shadow[id].request == NULL)
+               return -EINVAL;
        info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
+       return 0;
 }
 
+static const char *op_name(int op)
+{
+       static const char *const names[] = {
+               [BLKIF_OP_READ] = "read",
+               [BLKIF_OP_WRITE] = "write",
+               [BLKIF_OP_WRITE_BARRIER] = "barrier",
+               [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+               [BLKIF_OP_DISCARD] = "discard" };
+
+       if (op < 0 || op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
        unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE) {
+                       WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       /* We can't safely get the 'struct request' as
+                        * the id is busted. */
+                       continue;
+               }
                req  = info->shadow[id].request;
 
                if (bret->operation != BLKIF_OP_DISCARD)
                        blkif_completion(&info->shadow[id]);
 
-               add_id_to_freelist(info, id);
+               if (add_id_to_freelist(info, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       continue;
+               }
 
                error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
-                               printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-                                          info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                          info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-                               printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
-                               printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(error)) {
index dcbe05616090de6d5a830cc8a5950fb282820eee..9a1eb0cfa95f3f0a00a7334f3b276bf623d88cf6 100644 (file)
@@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
        old_parent = clk->parent;
 
-       /* find index of new parent clock using cached parent ptrs */
-       if (clk->parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (clk->parents[i] == parent)
-                               break;
-       else
+       if (!clk->parents)
                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
                                                                GFP_KERNEL);
 
        /*
-        * find index of new parent clock using string name comparison
-        * also try to cache the parent to avoid future calls to __clk_lookup
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
         */
-       if (i == clk->num_parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (!strcmp(clk->parent_names[i], parent->name)) {
-                               if (clk->parents)
-                                       clk->parents[i] = __clk_lookup(parent->name);
-                               break;
-                       }
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
+                               clk->parents[i] = __clk_lookup(parent->name);
+                       break;
+               }
+       }
 
        if (i == clk->num_parents) {
                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
index 5873e481e5d2dadde3c16e0e0883d8e6e3340dbe..a8743c399e83234c976ebdb4b471542a0645c42d 100644 (file)
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
        return true;
 }
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+                               const struct drm_display_mode *mode)
+{
+       struct drm_display_mode *m;
+       bool ok = false;
+
+       list_for_each_entry(m, &connector->probed_modes, head) {
+               if (mode->hdisplay == m->hdisplay &&
+                   mode->vdisplay == m->vdisplay &&
+                   drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+                       return false; /* duplicated */
+               if (mode->hdisplay <= m->hdisplay &&
+                   mode->vdisplay <= m->vdisplay)
+                       ok = true;
+       }
+       return ok;
+}
+
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+                   valid_inferred_mode(connector, drm_dmt_modes + i)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
index f94792626b94fadae47303f150e7aff278d371ef..36822b924eb12974f7c73af8fe8368707ab08abf 100644 (file)
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        pci_set_master(dev->pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index 59d44937dd9fcc9f1c4350bc590950d5be75175f..84b648a7ddd8cf697fc07fd00a1e2eed39ff46bd 100644 (file)
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
        rdev->vm_manager.enabled = false;
 
        /* mark first vm as always in use, it's the system one */
+       /* allocate enough for 2 full VM pts */
        r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                     rdev->vm_manager.max_pfn * 8,
+                                     rdev->vm_manager.max_pfn * 8 * 2,
                                      RADEON_GEM_DOMAIN_VRAM);
        if (r) {
                dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
-       vm->last_pfn = 0;
+       /* SI requires equal sized PTs for all VMs, so always set
+        * last_pfn to max_pfn.  cayman allows variable sized
+        * pts so we can grow then as needed.  Once we switch
+        * to two level pts we can unify this again.
+        */
+       if (rdev->family >= CHIP_TAHITI)
+               vm->last_pfn = rdev->vm_manager.max_pfn;
+       else
+               vm->last_pfn = 0;
        /* map the ib pool buffer at 0 in virtual address space, set
         * read only
         */
index f28bd4b7ef980937c88eb54c30b5534adc5abf3b..21ec9f5653cedc94e729e521e8cf62e4b14c2884 100644 (file)
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_wait_idle *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, NULL, false);
        /* callback hw specific functions if any */
-       if (robj->rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+       if (rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index c7b61f16ecfd79855315b5ad938499510a2d8b41..0b0279291a73e79109dd95db5ceee4823be1da66 100644 (file)
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15DC, 0);
 
        /* empty context1-15 */
-       /* FIXME start with 1G, once using 2 level pt switch to full
+       /* FIXME start with 4G, once using 2 level pt switch to full
         * vm size space
         */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index 37fdaf81bd1f89abfd28f6a46d2be7ce95f8bcba..ce59824fb41401963113a17af05ac660954bbef2 100644 (file)
@@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
        if (r)
                return r;
 
+       r = dm_pool_commit_metadata(pool->pmd);
+       if (r) {
+               DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+                     __func__, r);
+               return r;
+       }
+
        r = dm_pool_reserve_metadata_snap(pool->pmd);
        if (r)
                DMWARN("reserve_metadata_snap message failed.");
index 1c2f9048e1ae010864c4d2478c05f9a510c729e1..a4c219e3c859624925125e54029aac2da6211674 100644 (file)
@@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
                if ((info->state & (1<<MD_DISK_SYNC)) &&
-                   (!test_bit(In_sync, &rdev->flags) ||
-                    rdev->raid_disk != info->raid_disk)) {
+                    rdev->raid_disk != info->raid_disk) {
                        /* This was a hot-add request, but events doesn't
                         * match, so reject it.
                         */
@@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
        thread->tsk = kthread_run(md_thread, thread,
                                  "%s_%s",
                                  mdname(thread->mddev),
-                                 name ?: mddev->pers->name);
+                                 name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
        int skipped = 0;
        struct md_rdev *rdev;
        char *desc;
+       struct blk_plug plug;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
        }
        mddev->curr_resync_completed = j;
 
+       blk_start_plug(&plug);
        while (j < max_sectors) {
                sector_t sectors;
 
@@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
+       blk_finish_plug(&plug);
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 9339e67fcc79a9f961d016d3f80291ce012323bb..61a1833ebaf33ec40afaf752db9a849439c9bd13 100644 (file)
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
        }
 
        {
-               mddev->thread = md_register_thread(multipathd, mddev, NULL);
+               mddev->thread = md_register_thread(multipathd, mddev,
+                                                  "multipath");
                if (!mddev->thread) {
                        printk(KERN_ERR "multipath: couldn't allocate thread"
                                " for %s\n", mdname(mddev));
index 50ed53bf4aa2b1efe1136c2520067ece16bb9b35..fc90c11620adb026c9842d6e95497248d02556e8 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba9f6277a7c701615a1b3d788a05fa52559..3d0ed53328831627ab6ec79cb85946f36b0fe7a4 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 400fe144c0cd1c94dd5c325c5ed690a425b7ece3..e5604b32d91ff0017a5a625cfd8106fca92bfe19 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index a9c7981ddd245c6955cbed390b617a9172abde31..8c2754f835ef89a99d247719d2d114f6fba30b13 100644 (file)
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                int bad_sectors;
 
                int disk = start_disk + i;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
+               if (disk >= conf->raid_disks * 2)
+                       disk -= conf->raid_disks * 2;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
-       int plugged;
        int first_clone;
        int sectors_handled;
        int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
         * the bad blocks.  Each set of writes gets it's own r1bio
         * with a set of bios attached.
         */
-       plugged = mddev_check_plugged(mddev);
 
        disks = conf->raid_disks * 2;
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2621,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                goto abort;
        }
        err = -ENOMEM;
-       conf->thread = md_register_thread(raid1d, mddev, NULL);
+       conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid1:%s: couldn't allocate thread\n",
index 99ae6068e456992ef2b16c98e01d5c2ee0362128..8da6282254c3e822a27702c469b9afce720bda43 100644 (file)
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
-       int plugged;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1239,7 +1238,6 @@ read_again:
         * of r10_bios is recored in bio->bi_phys_segments just as with
         * the read case.
         */
-       plugged = mddev_check_plugged(mddev);
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)
                        continue;
@@ -1423,6 +1423,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !mddev->bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
        blk_start_plug(&plug);
        for (;;) {
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, or a replacement
                         * we only need to recover the block if it is set in
                         * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->resync_lock);
        init_waitqueue_head(&conf->wait_barrier);
 
-       conf->thread = md_register_thread(raid10d, mddev, NULL);
+       conf->thread = md_register_thread(raid10d, mddev, "raid10");
        if (!conf->thread)
                goto out;
 
index d26767246d26ad1d2bb9a2da371ab1bbbdf130fc..04348d76bb30fa8831964ea980ec2df912a45f92 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
+               int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (conf->mddev->degraded >= conf->max_degraded)
+               else if (conf->mddev->degraded >= conf->max_degraded) {
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (atomic_read(&rdev->read_errors)
+               else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, rdev);
+                       if (!(set_bad
+                             && test_bit(In_sync, &rdev->flags)
+                             && rdev_set_badblocks(
+                                     rdev, sh->sector, STRIPE_SECTORS, 0)))
+                               md_error(conf->mddev, rdev);
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (conf->mddev->external && unlikely(s.blocked_rdev))
-               md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+       if (unlikely(s.blocked_rdev)) {
+               if (conf->mddev->external)
+                       md_wait_for_blocked_rdev(s.blocked_rdev,
+                                                conf->mddev);
+               else
+                       /* Internal metadata will immediately
+                        * be written by raid5d, so we don't
+                        * need to wait here.
+                        */
+                       rdev_dec_pending(s.blocked_rdev,
+                                        conf->mddev);
+       }
 
        if (s.handle_bad_blocks)
                for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
-       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
-       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
+                       mddev_check_plugged(mddev);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
-                       
        }
-       if (!plugged)
-               md_wakeup_thread(mddev->thread);
 
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        int raid_disk, memory, max_disks;
        struct md_rdev *rdev;
        struct disk_info *disk;
+       char pers_name[6];
 
        if (mddev->new_level != 5
            && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
                       mdname(mddev), memory);
 
-       conf->thread = md_register_thread(raid5d, mddev, NULL);
+       sprintf(pers_name, "raid%d", mddev->new_level);
+       conf->thread = md_register_thread(raid5d, mddev, pers_name);
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
-               disk = rdev->saved_raid_disk;
-       else
-               disk = first;
-       for ( ; disk <= last ; disk++) {
+               first = rdev->saved_raid_disk;
+
+       for (disk = first; disk <= last; disk++) {
                p = conf->disks + disk;
                if (p->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
-                       break;
+                       goto out;
                }
+       }
+       for (disk = first; disk <= last; disk++) {
+               p = conf->disks + disk;
                if (test_bit(WantReplacement, &p->rdev->flags) &&
                    p->replacement == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+out:
        print_raid5_conf(conf);
        return err;
 }
index 0741aded9eb057bbc2454a220bd6a19318e975fa..f2db8fca46a13d8b431d977df4efee8a61bc0959 100644 (file)
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       if (likely(priv->tx_queue[i]->txcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
-                       }
                }
 
                baddr = &regs->rxic0;
                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-                       }
                }
        }
 }
index 351a4097b2baec09c53ce45cace4c03c1c8dcb47..76edbc1be33b4d1151e72c24c1f621c830c3a039 100644 (file)
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
index 31d37a2b5ba818e1366945b630f8b187ec9fb15b..623e30b9964de29e091f77564ccd53ce8ba30876 100644 (file)
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1341,8 +1328,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                       return -EINVAL;
-               }
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
index 8ce67064b9c5802efe098f702486477b424c273e..90eef07943f4d50bbd027646e170abca5abfb1a3 100644 (file)
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-           ((ec->rx_coalesce_usecs > 3) &&
-            (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-           (ec->rx_coalesce_usecs == 2))
-               return -EINVAL;
-
-       /* convert to rate of irq's per second */
-       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+       if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+               adapter->requested_itr = 1000000000 /
+                                       (adapter->current_itr * 256);
+       } else if ((ec->rx_coalesce_usecs == 3) ||
+                  (ec->rx_coalesce_usecs == 2)) {
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
-       } else {
-               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               /*
+                * The user's desire is to turn off interrupt throttling
+                * altogether, but due to HW limitations, we can't do that.
+                * Instead we set a very small value in EITR, which would
+                * allow ~967k interrupts per second, but allow the adapter's
+                * internal clocking to still function properly.
+                */
+               adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       }
+       } else
+               return -EINVAL;
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
index d614c374ed9d2ada5b6923e288528b8201a09807..3b5c4571b55e3c922a4b6e5a94a0a4a8adcb2fce 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
index 3767a122586043512cd6ded88b562fd9b97df88f..b01960fcfbc91110ede59a5e18f9fac94a912bc3 100644 (file)
@@ -197,6 +197,10 @@ err:
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
+
+       /* can be called while disconnecting */
+       if (!dev)
+               return 0;
        return qmi_wwan_manage_power(dev, on);
 }
 
index c54b7d37bff159f1fc8a7482a4d8bdfe806b9beb..420d69b2674ceae1282fcc9d8cbbc38d8c5d00af 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index 1c68e564f503197c9642ba1cae5e9633325fbe8c..995ca8e1302efc7562ff9905d8e91d00ffb25f74 100644 (file)
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
index e1fcc68124dc3bc78359cf72e2da459ddf9c4560..0735aeb3b26cefcd15c2847a1775718d36cecbc6 100644 (file)
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
                        __skb_unlink(skb, &rx_edma->rx_fifo);
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
-               } else {
-                       bf = NULL;
                }
+
+               bf = NULL;
        }
 
        *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen) {
index 0e81904956cf1c04fa37ad01e65282b55e2bf8f9..5c54aa43ca2d81b3936d917aeb68f7331aabb34b 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index 3ee23134c02b9b96ee184a5999e9577bf6452511..013680332f075be85a9035bcec33eaec0c2e9285 100644 (file)
@@ -796,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
        switch (op) {
        case ADD:
                ret = iwlagn_mac_sta_add(hw, vif, sta);
+               if (ret)
+                       break;
+               /*
+                * Clear the in-progress flag, the AP station entry was added
+                * but we'll initialize LQ only when we've associated (which
+                * would also clear the in-progress flag). This is necessary
+                * in case we never initialize LQ because association fails.
+                */
+               spin_lock_bh(&priv->sta_lock);
+               priv->stations[iwl_sta_id(sta)].used &=
+                       ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_bh(&priv->sta_lock);
                break;
        case REMOVE:
                ret = iwlagn_mac_sta_remove(hw, vif, sta);
index 9c44088054dd90bd967626d8416e63c083225fc8..900ee129e825987dfb2dbe6b485bfa0bb40dbc74 100644 (file)
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index f1bffebabc60a6ab1059276664bc3f3f71e129bb..6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index ceb82cd749cc7ba1ca2054ae0b675802d2194190..383820a52beba0d62428614131adbe42def1e5db 100644 (file)
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                /* save assoc resp ie index after auto-indexing */
                *assoc_idx = *((u16 *)pos);
 
+       kfree(ap_custom_ie);
        return ret;
 }
 
index e0377473282f05fab01835578cb9b2579a40217f..fc8a9bfa1248305fababa37b59ca90110a57afaa 100644 (file)
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
                adapter->event_cause = *(u32 *) skb->data;
 
-               skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
-                       memcpy(adapter->event_body, skb->data, skb->len);
+                       memcpy(adapter->event_body,
+                              skb->data + MWIFIEX_EVENT_HEADER_LEN,
+                              skb->len);
 
                /* event cause has been saved to adapter->event_cause */
                adapter->event_received = true;
index 4ace5a3dcd237c5aada48223e224a7f943ef2692..11e731f3581c2dbf4856d8676ca6ee66c12ccabe 100644 (file)
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_UAP_STA_ASSOC:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
                        len = -1;
 
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                 GFP_KERNEL);
                break;
        case EVENT_UAP_STA_DEAUTH:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
-                                GFP_KERNEL);
+               cfg80211_del_sta(priv->netdev, adapter->event_body +
+                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
index 49ebf20c56ebc0ac98dc8d5b25bc3a1164847020..22a5916564b84099576e1c554b37d1fcbcd420b2 100644 (file)
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
+       int ret;
 
        if (adapter->hs_activated)
                mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
                                dev_err(dev, "CMD: skb->len too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
                                dev_dbg(dev, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
                                                        skb->len);
-                                       return 0;
+                                       ret = 0;
+                                       goto exit_restore_skb;
                                }
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
                        adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
                                dev_err(dev, "EVENT: skb->len too small\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       skb_pull(skb, sizeof(u32));
                        dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
                                dev_err(dev, "EVENT: event body too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
-                       skb_copy_from_linear_data(skb, adapter->event_body,
-                                                 skb->len);
+                       memcpy(adapter->event_body, skb->data +
+                              MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
                        adapter->event_received = true;
                        adapter->event_skb = skb;
                        break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        }
 
        return -EINPROGRESS;
+
+exit_restore_skb:
+       /* The buffer will be reused for further cmds/events */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       return ret;
 }
 
 static void mwifiex_usb_rx_complete(struct urb *urb)
index f3fc6551585780b08724bb8112457caafeef220e..3fa4d417699381225e853a56238e0d8506a2f99b 100644 (file)
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
            ((priv->sec_info.wpa_enabled ||
              priv->sec_info.wpa2_enabled) &&
             !priv->wpa_is_gtk_set)) {
index d228358e6a403b9c8eee56d63ce4991058975dc9..9970c2b1b19979dc3577472c7c21e27703a38a76 100644 (file)
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index 54156b0b5c2d7defe6b42791d022d4c982fd945f..d7b907e67170348e70302e6a1cfe70642a8c6f7e 100644 (file)
@@ -1,7 +1,6 @@
 config WLCORE
        tristate "TI wlcore support"
        depends on WL_TI && GENERIC_HARDIRQS && MAC80211
-       depends on INET
        select FW_LOADER
        ---help---
          This module contains the main code for TI WLAN chips.  It abstracts
index c9f1318a3b820b363526576036c4894205552921..7bf08fa22ec9ab122bad5438a02bd78db6f1e885 100644 (file)
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, false,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index ba43f408baa38907a8f63a64314e07bb622a6e7f..07954b05b86cc3ac9c4f72aa97584b551914631e 100644 (file)
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 
 /*
index 176a939d15474cf73baff4b1d0020c2e1212d19b..1aff18346c7184e13ef2d8d5e7e15afee8dfb60b 100644 (file)
@@ -207,6 +207,9 @@ struct ftrace_event_call {
         *   bit 1:             enabled
         *   bit 2:             filter_active
         *   bit 3:             enabled cmd record
+        *   bit 4:             allow trace by non root (cap any)
+        *   bit 5:             failed to apply filter
+        *   bit 6:             ftrace internal event (do not enable)
         *
         * Changes to flags must hold the event_mutex.
         *
index ab741b0d007402daf8feee824e0aae91751bafe6..5f187026b8126d8250c72f941ed6d588f01e9dca 100644 (file)
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB7  0x3c27
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB8  0x3c2e
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB9  0x3c2f
+#define PCI_DEVICE_ID_INTEL_UNC_HA     0x3c46
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0   0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1   0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2   0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3   0x3cb5
+#define PCI_DEVICE_ID_INTEL_UNC_QPI0   0x3c41
+#define PCI_DEVICE_ID_INTEL_UNC_QPI1   0x3c42
+#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX      0x3ce0
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB   0x402f
 #define PCI_DEVICE_ID_INTEL_5100_16    0x65f0
 #define PCI_DEVICE_ID_INTEL_5100_21    0x65f5
index 45db49f64bb492ddf34d2b451fffa810ea45b965..76c5c8b724a77253e3ca635c90e53ed6230b3c5f 100644 (file)
@@ -677,6 +677,7 @@ struct hw_perf_event {
                        u64             last_tag;
                        unsigned long   config_base;
                        unsigned long   event_base;
+                       int             event_base_rdpmc;
                        int             idx;
                        int             last_cpu;
 
@@ -1106,6 +1107,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                struct task_struct *task,
                                perf_overflow_handler_t callback,
                                void *context);
+extern void perf_pmu_migrate_context(struct pmu *pmu,
+                               int src_cpu, int dst_cpu);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
 
index 4059c0f33f07a7454d545753281898d92e2d657f..c7cfa6996db47602b00a09d38c68d3d956759265 100644 (file)
@@ -1581,7 +1581,6 @@ struct task_struct {
 #endif
 #ifdef CONFIG_UPROBES
        struct uprobe_task *utask;
-       int uprobe_srcu_id;
 #endif
 };
 
index 26e5b613deda9e63816b58c59f53f06d7dccd50c..09a545a7dfa39bcd7f736f446358d4c3b112aae4 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index e4652fe5895863d956a52e436d10f1fc3724415b..fecdf31816f2fc6a834a060cfdd39158dcb30bc2 100644 (file)
@@ -912,6 +912,9 @@ struct sctp_transport {
                /* Is this structure kfree()able? */
                malloced:1;
 
+       /* Has this transport moved the ctsn since we last sacked */
+       __u32 sack_generation;
+
        struct flowi fl;
 
        /* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
                 */
                __u8    sack_needed;     /* Do we need to sack the peer? */
                __u32   sack_cnt;
+               __u32   sack_generation;
 
                /* These are capabilities which our peer advertised.  */
                __u8    ecn_capable:1,      /* Can peer do ECN? */
index e7728bc14ccfde5bd85c3b08f990487af66ab072..2c5d2b4d5d1eb51542df26094700250ab6191070 100644 (file)
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+                    struct sctp_transport *trans);
 
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
index d7d71d6ec97278cf60fa5b13e79a80bff27b6af2..f1cf0edeb39afa1a3f6543a2f383ac8c4828127b 100644 (file)
@@ -1645,6 +1645,8 @@ perf_install_in_context(struct perf_event_context *ctx,
        lockdep_assert_held(&ctx->mutex);
 
        event->ctx = ctx;
+       if (event->cpu != -1)
+               event->cpu = cpu;
 
        if (!task) {
                /*
@@ -6252,6 +6254,8 @@ SYSCALL_DEFINE5(perf_event_open,
                }
        }
 
+       get_online_cpus();
+
        event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
                                 NULL, NULL);
        if (IS_ERR(event)) {
@@ -6304,7 +6308,7 @@ SYSCALL_DEFINE5(perf_event_open,
        /*
         * Get the target context (task or percpu):
         */
-       ctx = find_get_context(pmu, task, cpu);
+       ctx = find_get_context(pmu, task, event->cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto err_alloc;
@@ -6377,20 +6381,23 @@ SYSCALL_DEFINE5(perf_event_open,
        mutex_lock(&ctx->mutex);
 
        if (move_group) {
-               perf_install_in_context(ctx, group_leader, cpu);
+               synchronize_rcu();
+               perf_install_in_context(ctx, group_leader, event->cpu);
                get_ctx(ctx);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_install_in_context(ctx, sibling, cpu);
+                       perf_install_in_context(ctx, sibling, event->cpu);
                        get_ctx(ctx);
                }
        }
 
-       perf_install_in_context(ctx, event, cpu);
+       perf_install_in_context(ctx, event, event->cpu);
        ++ctx->generation;
        perf_unpin_context(ctx);
        mutex_unlock(&ctx->mutex);
 
+       put_online_cpus();
+
        event->owner = current;
 
        mutex_lock(&current->perf_event_mutex);
@@ -6419,6 +6426,7 @@ err_context:
 err_alloc:
        free_event(event);
 err_task:
+       put_online_cpus();
        if (task)
                put_task_struct(task);
 err_group_fd:
@@ -6479,6 +6487,39 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 
+void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+{
+       struct perf_event_context *src_ctx;
+       struct perf_event_context *dst_ctx;
+       struct perf_event *event, *tmp;
+       LIST_HEAD(events);
+
+       src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
+       dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
+
+       mutex_lock(&src_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
+                                event_entry) {
+               perf_remove_from_context(event);
+               put_ctx(src_ctx);
+               list_add(&event->event_entry, &events);
+       }
+       mutex_unlock(&src_ctx->mutex);
+
+       synchronize_rcu();
+
+       mutex_lock(&dst_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &events, event_entry) {
+               list_del(&event->event_entry);
+               if (event->state >= PERF_EVENT_STATE_OFF)
+                       event->state = PERF_EVENT_STATE_INACTIVE;
+               perf_install_in_context(dst_ctx, event, dst_cpu);
+               get_ctx(dst_ctx);
+       }
+       mutex_unlock(&dst_ctx->mutex);
+}
+EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
+
 static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
 {
index 985be4d80fe8188da34ee47be809faf7b0801ee9..f93532748bca38340f0f2d196b54324a03590480 100644 (file)
 #define UINSNS_PER_PAGE                        (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
 #define MAX_UPROBE_XOL_SLOTS           UINSNS_PER_PAGE
 
-static struct srcu_struct uprobes_srcu;
 static struct rb_root uprobes_tree = RB_ROOT;
 
 static DEFINE_SPINLOCK(uprobes_treelock);      /* serialize rbtree access */
 
 #define UPROBES_HASH_SZ        13
 
+/*
+ * We need separate register/unregister and mmap/munmap lock hashes because
+ * of mmap_sem nesting.
+ *
+ * uprobe_register() needs to install probes on (potentially) all processes
+ * and thus needs to acquire multiple mmap_sems (consequtively, not
+ * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
+ * for the particular process doing the mmap.
+ *
+ * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
+ * because of lock order against i_mmap_mutex. This means there's a hole in
+ * the register vma iteration where a mmap() can happen.
+ *
+ * Thus uprobe_register() can race with uprobe_mmap() and we can try and
+ * install a probe where one is already installed.
+ */
+
 /* serialize (un)register */
 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
 
@@ -61,17 +77,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  */
 static atomic_t uprobe_events = ATOMIC_INIT(0);
 
-/*
- * Maintain a temporary per vma info that can be used to search if a vma
- * has already been handled. This structure is introduced since extending
- * vm_area_struct wasnt recommended.
- */
-struct vma_info {
-       struct list_head        probe_list;
-       struct mm_struct        *mm;
-       loff_t                  vaddr;
-};
-
 struct uprobe {
        struct rb_node          rb_node;        /* node in the rb tree */
        atomic_t                ref;
@@ -100,7 +105,8 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
        if (!is_register)
                return true;
 
-       if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
+       if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
+                               == (VM_READ|VM_EXEC))
                return true;
 
        return false;
@@ -129,33 +135,17 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
 static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
 {
        struct mm_struct *mm = vma->vm_mm;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep;
-       spinlock_t *ptl;
        unsigned long addr;
-       int err = -EFAULT;
+       spinlock_t *ptl;
+       pte_t *ptep;
 
        addr = page_address_in_vma(page, vma);
        if (addr == -EFAULT)
-               goto out;
-
-       pgd = pgd_offset(mm, addr);
-       if (!pgd_present(*pgd))
-               goto out;
-
-       pud = pud_offset(pgd, addr);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, addr);
-       if (!pmd_present(*pmd))
-               goto out;
+               return -EFAULT;
 
-       ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       ptep = page_check_address(page, mm, addr, &ptl, 0);
        if (!ptep)
-               goto out;
+               return -EAGAIN;
 
        get_page(kpage);
        page_add_new_anon_rmap(kpage, vma, addr);
@@ -174,10 +164,8 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct
                try_to_free_swap(page);
        put_page(page);
        pte_unmap_unlock(ptep, ptl);
-       err = 0;
 
-out:
-       return err;
+       return 0;
 }
 
 /**
@@ -222,9 +210,8 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        void *vaddr_old, *vaddr_new;
        struct vm_area_struct *vma;
        struct uprobe *uprobe;
-       loff_t addr;
        int ret;
-
+retry:
        /* Read the page with vaddr into memory */
        ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
        if (ret <= 0)
@@ -246,10 +233,6 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        if (mapping != vma->vm_file->f_mapping)
                goto put_out;
 
-       addr = vma_address(vma, uprobe->offset);
-       if (vaddr != (unsigned long)addr)
-               goto put_out;
-
        ret = -ENOMEM;
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
        if (!new_page)
@@ -267,11 +250,7 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        vaddr_new = kmap_atomic(new_page);
 
        memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
-
-       /* poke the new insn in, ASSUMES we don't cross page boundary */
-       vaddr &= ~PAGE_MASK;
-       BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
-       memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+       memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
 
        kunmap_atomic(vaddr_new);
        kunmap_atomic(vaddr_old);
@@ -291,6 +270,8 @@ unlock_out:
 put_out:
        put_page(old_page);
 
+       if (unlikely(ret == -EAGAIN))
+               goto retry;
        return ret;
 }
 
@@ -312,7 +293,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
        void *vaddr_new;
        int ret;
 
-       ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
+       ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
        if (ret <= 0)
                return ret;
 
@@ -333,10 +314,20 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
        uprobe_opcode_t opcode;
        int result;
 
+       if (current->mm == mm) {
+               pagefault_disable();
+               result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
+                                                               sizeof(opcode));
+               pagefault_enable();
+
+               if (likely(result == 0))
+                       goto out;
+       }
+
        result = read_opcode(mm, vaddr, &opcode);
        if (result)
                return result;
-
+out:
        if (is_swbp_insn(&opcode))
                return 1;
 
@@ -355,7 +346,9 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
        int result;
-
+       /*
+        * See the comment near uprobes_hash().
+        */
        result = is_swbp_at_addr(mm, vaddr);
        if (result == 1)
                return -EEXIST;
@@ -520,7 +513,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
        uprobe->inode = igrab(inode);
        uprobe->offset = offset;
        init_rwsem(&uprobe->consumer_rwsem);
-       INIT_LIST_HEAD(&uprobe->pending_list);
 
        /* add to uprobes_tree, sorted on inode:offset */
        cur_uprobe = insert_uprobe(uprobe);
@@ -588,20 +580,22 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 }
 
 static int
-__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
-                       unsigned long nbytes, unsigned long offset)
+__copy_insn(struct address_space *mapping, struct file *filp, char *insn,
+                       unsigned long nbytes, loff_t offset)
 {
-       struct file *filp = vma->vm_file;
        struct page *page;
        void *vaddr;
-       unsigned long off1;
-       unsigned long idx;
+       unsigned long off;
+       pgoff_t idx;
 
        if (!filp)
                return -EINVAL;
 
-       idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
-       off1 = offset &= ~PAGE_MASK;
+       if (!mapping->a_ops->readpage)
+               return -EIO;
+
+       idx = offset >> PAGE_CACHE_SHIFT;
+       off = offset & ~PAGE_MASK;
 
        /*
         * Ensure that the page that has the original instruction is
@@ -612,22 +606,20 @@ __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *ins
                return PTR_ERR(page);
 
        vaddr = kmap_atomic(page);
-       memcpy(insn, vaddr + off1, nbytes);
+       memcpy(insn, vaddr + off, nbytes);
        kunmap_atomic(vaddr);
        page_cache_release(page);
 
        return 0;
 }
 
-static int
-copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+static int copy_insn(struct uprobe *uprobe, struct file *filp)
 {
        struct address_space *mapping;
        unsigned long nbytes;
        int bytes;
 
-       addr &= ~PAGE_MASK;
-       nbytes = PAGE_SIZE - addr;
+       nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
        mapping = uprobe->inode->i_mapping;
 
        /* Instruction at end of binary; copy only available bytes */
@@ -638,13 +630,13 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 
        /* Instruction at the page-boundary; copy bytes in second page */
        if (nbytes < bytes) {
-               if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
-                               bytes - nbytes, uprobe->offset + nbytes))
-                       return -ENOMEM;
-
+               int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
+                               bytes - nbytes, uprobe->offset + nbytes);
+               if (err)
+                       return err;
                bytes = nbytes;
        }
-       return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
+       return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
 }
 
 /*
@@ -672,9 +664,8 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
  */
 static int
 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
-                       struct vm_area_struct *vma, loff_t vaddr)
+                       struct vm_area_struct *vma, unsigned long vaddr)
 {
-       unsigned long addr;
        int ret;
 
        /*
@@ -687,20 +678,22 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
        if (!uprobe->consumers)
                return -EEXIST;
 
-       addr = (unsigned long)vaddr;
-
        if (!(uprobe->flags & UPROBE_COPY_INSN)) {
-               ret = copy_insn(uprobe, vma, addr);
+               ret = copy_insn(uprobe, vma->vm_file);
                if (ret)
                        return ret;
 
                if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
-                       return -EEXIST;
+                       return -ENOTSUPP;
 
-               ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
+               ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
                if (ret)
                        return ret;
 
+               /* write_opcode() assumes we don't cross page boundary */
+               BUG_ON((uprobe->offset & ~PAGE_MASK) +
+                               UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+
                uprobe->flags |= UPROBE_COPY_INSN;
        }
 
@@ -713,7 +706,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
         * Hence increment before and decrement on failure.
         */
        atomic_inc(&mm->uprobes_state.count);
-       ret = set_swbp(&uprobe->arch, mm, addr);
+       ret = set_swbp(&uprobe->arch, mm, vaddr);
        if (ret)
                atomic_dec(&mm->uprobes_state.count);
 
@@ -721,27 +714,21 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 }
 
 static void
-remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 {
-       if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
+       if (!set_orig_insn(&uprobe->arch, mm, vaddr, true))
                atomic_dec(&mm->uprobes_state.count);
 }
 
 /*
- * There could be threads that have hit the breakpoint and are entering the
- * notifier code and trying to acquire the uprobes_treelock. The thread
- * calling delete_uprobe() that is removing the uprobe from the rb_tree can
- * race with these threads and might acquire the uprobes_treelock compared
- * to some of the breakpoint hit threads. In such a case, the breakpoint
- * hit threads will not find the uprobe. The current unregistering thread
- * waits till all other threads have hit a breakpoint, to acquire the
- * uprobes_treelock before the uprobe is removed from the rbtree.
+ * There could be threads that have already hit the breakpoint. They
+ * will recheck the current insn and restart if find_uprobe() fails.
+ * See find_active_uprobe().
  */
 static void delete_uprobe(struct uprobe *uprobe)
 {
        unsigned long flags;
 
-       synchronize_srcu(&uprobes_srcu);
        spin_lock_irqsave(&uprobes_treelock, flags);
        rb_erase(&uprobe->rb_node, &uprobes_tree);
        spin_unlock_irqrestore(&uprobes_treelock, flags);
@@ -750,139 +737,135 @@ static void delete_uprobe(struct uprobe *uprobe)
        atomic_dec(&uprobe_events);
 }
 
-static struct vma_info *
-__find_next_vma_info(struct address_space *mapping, struct list_head *head,
-                       struct vma_info *vi, loff_t offset, bool is_register)
+struct map_info {
+       struct map_info *next;
+       struct mm_struct *mm;
+       unsigned long vaddr;
+};
+
+static inline struct map_info *free_map_info(struct map_info *info)
+{
+       struct map_info *next = info->next;
+       kfree(info);
+       return next;
+}
+
+static struct map_info *
+build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
 {
+       unsigned long pgoff = offset >> PAGE_SHIFT;
        struct prio_tree_iter iter;
        struct vm_area_struct *vma;
-       struct vma_info *tmpvi;
-       unsigned long pgoff;
-       int existing_vma;
-       loff_t vaddr;
-
-       pgoff = offset >> PAGE_SHIFT;
+       struct map_info *curr = NULL;
+       struct map_info *prev = NULL;
+       struct map_info *info;
+       int more = 0;
 
+ again:
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                if (!valid_vma(vma, is_register))
                        continue;
 
-               existing_vma = 0;
-               vaddr = vma_address(vma, offset);
-
-               list_for_each_entry(tmpvi, head, probe_list) {
-                       if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
-                               existing_vma = 1;
-                               break;
-                       }
+               if (!prev && !more) {
+                       /*
+                        * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
+                        * reclaim. This is optimistic, no harm done if it fails.
+                        */
+                       prev = kmalloc(sizeof(struct map_info),
+                                       GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
+                       if (prev)
+                               prev->next = NULL;
                }
-
-               /*
-                * Another vma needs a probe to be installed. However skip
-                * installing the probe if the vma is about to be unlinked.
-                */
-               if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
-                       vi->mm = vma->vm_mm;
-                       vi->vaddr = vaddr;
-                       list_add(&vi->probe_list, head);
-
-                       return vi;
+               if (!prev) {
+                       more++;
+                       continue;
                }
-       }
 
-       return NULL;
-}
-
-/*
- * Iterate in the rmap prio tree  and find a vma where a probe has not
- * yet been inserted.
- */
-static struct vma_info *
-find_next_vma_info(struct address_space *mapping, struct list_head *head,
-               loff_t offset, bool is_register)
-{
-       struct vma_info *vi, *retvi;
+               if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
+                       continue;
 
-       vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
-       if (!vi)
-               return ERR_PTR(-ENOMEM);
+               info = prev;
+               prev = prev->next;
+               info->next = curr;
+               curr = info;
 
-       mutex_lock(&mapping->i_mmap_mutex);
-       retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
+               info->mm = vma->vm_mm;
+               info->vaddr = vma_address(vma, offset);
+       }
        mutex_unlock(&mapping->i_mmap_mutex);
 
-       if (!retvi)
-               kfree(vi);
+       if (!more)
+               goto out;
+
+       prev = curr;
+       while (curr) {
+               mmput(curr->mm);
+               curr = curr->next;
+       }
 
-       return retvi;
+       do {
+               info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
+               if (!info) {
+                       curr = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
+               info->next = prev;
+               prev = info;
+       } while (--more);
+
+       goto again;
+ out:
+       while (prev)
+               prev = free_map_info(prev);
+       return curr;
 }
 
 static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 {
-       struct list_head try_list;
-       struct vm_area_struct *vma;
-       struct address_space *mapping;
-       struct vma_info *vi, *tmpvi;
-       struct mm_struct *mm;
-       loff_t vaddr;
-       int ret;
+       struct map_info *info;
+       int err = 0;
 
-       mapping = uprobe->inode->i_mapping;
-       INIT_LIST_HEAD(&try_list);
+       info = build_map_info(uprobe->inode->i_mapping,
+                                       uprobe->offset, is_register);
+       if (IS_ERR(info))
+               return PTR_ERR(info);
 
-       ret = 0;
+       while (info) {
+               struct mm_struct *mm = info->mm;
+               struct vm_area_struct *vma;
 
-       for (;;) {
-               vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
-               if (!vi)
-                       break;
+               if (err)
+                       goto free;
 
-               if (IS_ERR(vi)) {
-                       ret = PTR_ERR(vi);
-                       break;
-               }
+               down_write(&mm->mmap_sem);
+               vma = find_vma(mm, (unsigned long)info->vaddr);
+               if (!vma || !valid_vma(vma, is_register))
+                       goto unlock;
 
-               mm = vi->mm;
-               down_read(&mm->mmap_sem);
-               vma = find_vma(mm, (unsigned long)vi->vaddr);
-               if (!vma || !valid_vma(vma, is_register)) {
-                       list_del(&vi->probe_list);
-                       kfree(vi);
-                       up_read(&mm->mmap_sem);
-                       mmput(mm);
-                       continue;
-               }
-               vaddr = vma_address(vma, uprobe->offset);
                if (vma->vm_file->f_mapping->host != uprobe->inode ||
-                                               vaddr != vi->vaddr) {
-                       list_del(&vi->probe_list);
-                       kfree(vi);
-                       up_read(&mm->mmap_sem);
-                       mmput(mm);
-                       continue;
-               }
-
-               if (is_register)
-                       ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
-               else
-                       remove_breakpoint(uprobe, mm, vi->vaddr);
+                   vma_address(vma, uprobe->offset) != info->vaddr)
+                       goto unlock;
 
-               up_read(&mm->mmap_sem);
-               mmput(mm);
                if (is_register) {
-                       if (ret && ret == -EEXIST)
-                               ret = 0;
-                       if (ret)
-                               break;
+                       err = install_breakpoint(uprobe, mm, vma, info->vaddr);
+                       /*
+                        * We can race against uprobe_mmap(), see the
+                        * comment near uprobe_hash().
+                        */
+                       if (err == -EEXIST)
+                               err = 0;
+               } else {
+                       remove_breakpoint(uprobe, mm, info->vaddr);
                }
+ unlock:
+               up_write(&mm->mmap_sem);
+ free:
+               mmput(mm);
+               info = free_map_info(info);
        }
 
-       list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
-               list_del(&vi->probe_list);
-               kfree(vi);
-       }
-
-       return ret;
+       return err;
 }
 
 static int __uprobe_register(struct uprobe *uprobe)
@@ -1048,7 +1031,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
 int uprobe_mmap(struct vm_area_struct *vma)
 {
        struct list_head tmp_list;
-       struct uprobe *uprobe, *u;
+       struct uprobe *uprobe;
        struct inode *inode;
        int ret, count;
 
@@ -1066,12 +1049,9 @@ int uprobe_mmap(struct vm_area_struct *vma)
        ret = 0;
        count = 0;
 
-       list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               loff_t vaddr;
-
-               list_del(&uprobe->pending_list);
+       list_for_each_entry(uprobe, &tmp_list, pending_list) {
                if (!ret) {
-                       vaddr = vma_address(vma, uprobe->offset);
+                       loff_t vaddr = vma_address(vma, uprobe->offset);
 
                        if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
                                put_uprobe(uprobe);
@@ -1079,8 +1059,10 @@ int uprobe_mmap(struct vm_area_struct *vma)
                        }
 
                        ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
-
-                       /* Ignore double add: */
+                       /*
+                        * We can race against uprobe_register(), see the
+                        * comment near uprobe_hash().
+                        */
                        if (ret == -EEXIST) {
                                ret = 0;
 
@@ -1115,7 +1097,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
        struct list_head tmp_list;
-       struct uprobe *uprobe, *u;
+       struct uprobe *uprobe;
        struct inode *inode;
 
        if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
@@ -1132,11 +1114,8 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
        mutex_lock(uprobes_mmap_hash(inode));
        build_probe_list(inode, &tmp_list);
 
-       list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               loff_t vaddr;
-
-               list_del(&uprobe->pending_list);
-               vaddr = vma_address(vma, uprobe->offset);
+       list_for_each_entry(uprobe, &tmp_list, pending_list) {
+               loff_t vaddr = vma_address(vma, uprobe->offset);
 
                if (vaddr >= start && vaddr < end) {
                        /*
@@ -1378,9 +1357,6 @@ void uprobe_free_utask(struct task_struct *t)
 {
        struct uprobe_task *utask = t->utask;
 
-       if (t->uprobe_srcu_id != -1)
-               srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
-
        if (!utask)
                return;
 
@@ -1398,7 +1374,6 @@ void uprobe_free_utask(struct task_struct *t)
 void uprobe_copy_process(struct task_struct *t)
 {
        t->utask = NULL;
-       t->uprobe_srcu_id = -1;
 }
 
 /*
@@ -1417,7 +1392,6 @@ static struct uprobe_task *add_utask(void)
        if (unlikely(!utask))
                return NULL;
 
-       utask->active_uprobe = NULL;
        current->utask = utask;
        return utask;
 }
@@ -1479,41 +1453,64 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
        return false;
 }
 
+static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
+{
+       struct mm_struct *mm = current->mm;
+       struct uprobe *uprobe = NULL;
+       struct vm_area_struct *vma;
+
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, bp_vaddr);
+       if (vma && vma->vm_start <= bp_vaddr) {
+               if (valid_vma(vma, false)) {
+                       struct inode *inode;
+                       loff_t offset;
+
+                       inode = vma->vm_file->f_mapping->host;
+                       offset = bp_vaddr - vma->vm_start;
+                       offset += (vma->vm_pgoff << PAGE_SHIFT);
+                       uprobe = find_uprobe(inode, offset);
+               }
+
+               if (!uprobe)
+                       *is_swbp = is_swbp_at_addr(mm, bp_vaddr);
+       } else {
+               *is_swbp = -EFAULT;
+       }
+       up_read(&mm->mmap_sem);
+
+       return uprobe;
+}
+
 /*
  * Run handler and ask thread to singlestep.
  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
  */
 static void handle_swbp(struct pt_regs *regs)
 {
-       struct vm_area_struct *vma;
        struct uprobe_task *utask;
        struct uprobe *uprobe;
-       struct mm_struct *mm;
        unsigned long bp_vaddr;
+       int uninitialized_var(is_swbp);
 
-       uprobe = NULL;
        bp_vaddr = uprobe_get_swbp_addr(regs);
-       mm = current->mm;
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, bp_vaddr);
-
-       if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
-               struct inode *inode;
-               loff_t offset;
-
-               inode = vma->vm_file->f_mapping->host;
-               offset = bp_vaddr - vma->vm_start;
-               offset += (vma->vm_pgoff << PAGE_SHIFT);
-               uprobe = find_uprobe(inode, offset);
-       }
-
-       srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
-       current->uprobe_srcu_id = -1;
-       up_read(&mm->mmap_sem);
+       uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
 
        if (!uprobe) {
-               /* No matching uprobe; signal SIGTRAP. */
-               send_sig(SIGTRAP, current, 0);
+               if (is_swbp > 0) {
+                       /* No matching uprobe; signal SIGTRAP. */
+                       send_sig(SIGTRAP, current, 0);
+               } else {
+                       /*
+                        * Either we raced with uprobe_unregister() or we can't
+                        * access this memory. The latter is only possible if
+                        * another thread plays with our ->mm. In both cases
+                        * we can simply restart. If this vma was unmapped we
+                        * can pretend this insn was not executed yet and get
+                        * the (correct) SIGSEGV after restart.
+                        */
+                       instruction_pointer_set(regs, bp_vaddr);
+               }
                return;
        }
 
@@ -1620,7 +1617,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
                utask->state = UTASK_BP_HIT;
 
        set_thread_flag(TIF_UPROBE);
-       current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
 
        return 1;
 }
@@ -1655,7 +1651,6 @@ static int __init init_uprobes(void)
                mutex_init(&uprobes_mutex[i]);
                mutex_init(&uprobes_mmap_mutex[i]);
        }
-       init_srcu_struct(&uprobes_srcu);
 
        return register_die_notifier(&uprobe_exception_nb);
 }
index ab56a1764d4db8c6a5136cd3b636dc330648783b..e8cd2027abbd1e2e0ed6d432974816cef2845a42 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index a008663d86c8e740c64c1cc26977a7a199917b9b..b4f20fba09fcc77dc571bdf718bfd04adfb29897 100644 (file)
@@ -312,7 +312,7 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       if (ftrace_disabled)
+       if (unlikely(ftrace_disabled))
                return -ENODEV;
 
        if (FTRACE_WARN_ON(ops == &global_ops))
@@ -4299,16 +4299,12 @@ int register_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_lock);
 
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        ret = __register_ftrace_function(ops);
        if (!ret)
                ret = ftrace_startup(ops, 0);
 
-
- out_unlock:
        mutex_unlock(&ftrace_lock);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(register_ftrace_function);
index 49249c28690dbb21b6914e2907b16af56e1094c9..a7fa0702be1cd5b269ed6f15e9624f4b02968b11 100644 (file)
@@ -3609,6 +3609,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3680,7 +3681,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4231,6 +4232,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4318,7 +4320,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index df611a0e76c55b0d47febf4312874e839114bb13..123b189c732c4142965252d80e9d49745127fe39 100644 (file)
@@ -1325,4 +1325,4 @@ __init static int init_events(void)
 
        return 0;
 }
-device_initcall(init_events);
+early_initcall(init_events);
index a15a466d0d1d14b21bd82de2bfd3df31654420db..4ce02e0673db612befb63def38911d2d2b0638af 100644 (file)
@@ -1594,6 +1594,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1682,7 +1683,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
index 6df214041a5e5bfe85b948b41ce56e15af514860..84f01ba81a34655becfd3be7af5fed76251d21ca 100644 (file)
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
                no_module = request_module("netdev-%s", name);
        if (no_module && capable(CAP_SYS_MODULE)) {
                if (!request_module("%s", name))
-                       pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                              name);
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
        }
 }
 EXPORT_SYMBOL(dev_load);
index d78671e9d545be838f9ab5140c9ee07fb1309c26..46a3d23d259e51c17140798554874e929145d6bb 100644 (file)
@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
index 66e4fcdd1c6b4a6a07a7d26f3a66bc7e43183807..a4bb856de08f528ae564ab68fd2e83e6f779075f 100644 (file)
@@ -1342,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
-       u8 bssid[ETH_ALEN];
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -1354,10 +1353,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_stop_poll(sdata);
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
        ifmgd->associated = NULL;
-       memset(ifmgd->bssid, 0, ETH_ALEN);
 
        /*
         * we need to commit the associated = NULL change because the
@@ -1377,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_carrier_off(sdata->dev);
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
+       sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
                ieee80211_sta_tear_down_BA_sessions(sta, tx);
@@ -1386,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
-               ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
-                                              tx, frame_buf);
+               ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+                                              reason, tx, frame_buf);
 
        /* flush out frame */
        if (tx)
                drv_flush(local, false);
 
+       /* clear bssid only after building the needed mgmt frames */
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+
        /* remove AP and TDLS peers */
        sta_info_flush(local, sdata);
 
index 7bcecf73aafbf9dadb87a2c8a114e3eb92490131..965e6ec0adb6c12393ced183a44463a63e09a9ff 100644 (file)
@@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index 819c342f5b3012b6a4e37f60414fb835fc0041d8..9730882697aaedbab0beee66f0f12a654b97b63a 100644 (file)
@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
        return 0;
 }
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       return -EOPNOTSUPP;
+}
+
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_NONE]        = {
+               .call           = ip_set_none,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+       },
        [IPSET_CMD_CREATE]      = {
                .call           = ip_set_create,
                .attr_count     = IPSET_ATTR_CMD_MAX,
index ee863943c8267286e4b16e52400dd40bf51b4f26..d5d3607ae7bcf5e9704bd189217115cf048aee4b 100644 (file)
@@ -38,30 +38,6 @@ struct iface_node {
 
 #define iface_data(n)  (rb_entry(n, struct iface_node, node)->iface)
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-       const long *a = (const long *)_a;
-       const long *b = (const long *)_b;
-
-       BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-       if (a[0] != b[0])
-               return a[0] - b[0];
-       if (IFNAMSIZ > sizeof(long)) {
-               if (a[1] != b[1])
-                       return a[1] - b[1];
-       }
-       if (IFNAMSIZ > 2 * sizeof(long)) {
-               if (a[2] != b[2])
-                       return a[2] - b[2];
-       }
-       if (IFNAMSIZ > 3 * sizeof(long)) {
-               if (a[3] != b[3])
-                       return a[3] - b[3];
-       }
-       return 0;
-}
-
 static void
 rbtree_destroy(struct rb_root *root)
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
        while (n) {
                const char *d = iface_data(n);
-               long res = ifname_compare(*iface, d);
+               int res = strcmp(*iface, d);
 
                if (res < 0)
                        n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
        while (*n) {
                char *ifname = iface_data(*n);
-               long res = ifname_compare(*iface, ifname);
+               int res = strcmp(*iface, ifname);
 
                p = *n;
                if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface4_elem data = { .cidr = HOST_MASK };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = { .cidr = HOST_MASK };
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
index dd811b8dd97c66a9da387a4e37073c47e8dc4407..d43e3c122f7bdc5b042b92ce74981149922faa78 100644 (file)
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-                                   const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+                                    const struct in6_addr *addr)
 {
-       struct rt6_info *rt;
        struct flowi6 fl6 = {
                .daddr = *addr,
        };
+       struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+       bool is_local;
 
-       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-               return 1;
+       is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
-       return 0;
+       dst_release(dst);
+       return is_local;
 }
 #endif
 
index 3e797d1fcb94272ad96b9798e91a6e6467031379..791d56bbd74a7613e4b1d87ba74bcf2a8a135e46 100644 (file)
@@ -169,8 +169,10 @@ replay:
 
                err = nla_parse(cda, ss->cb[cb_id].attr_count,
                                attr, attrlen, ss->cb[cb_id].policy);
-               if (err < 0)
+               if (err < 0) {
+                       rcu_read_unlock();
                        return err;
+               }
 
                if (nc->call_rcu) {
                        err = nc->call_rcu(net->nfnl, skb, nlh,
index cb2646179e5f8b8d1836499579d1ccfd109fad60..2ab196a9f228ac873238c2a060685bacea62b87a 100644 (file)
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
 
        pr_debug("sens_res 0x%x, nfcid1_len %d\n",
                 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
                        struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
                                                     __u8 *data)
 {
-       nfcb_poll->sensb_res_len = *data++;
+       nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
 
        pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
 
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
                                                     __u8 *data)
 {
        nfcf_poll->bit_rate = *data++;
-       nfcf_poll->sensf_res_len = *data++;
+       nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
 
        pr_debug("bit_rate %d, sensf_res_len %d\n",
                 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
                nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
-               nfca_poll->rats_res_len = *data++;
+               nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
                pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
                if (nfca_poll->rats_res_len > 0) {
                        memcpy(nfca_poll->rats_res,
@@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
 
        case NCI_NFC_B_PASSIVE_POLL_MODE:
                nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
-               nfcb_poll->attrib_res_len = *data++;
+               nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
                pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
                if (nfcb_poll->attrib_res_len > 0) {
                        memcpy(nfcb_poll->attrib_res,
index ec1134c9e07fcd34fc5d6116e1a4aef9dedf6f23..8b8a6a2b2badaf61e9c71a174809ca989438668f 100644 (file)
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       pr_debug("sock=%p\n", sock);
+       pr_debug("sock=%p sk=%p\n", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index 5bc9ab161b373eadf51843ebaa77c527716a2122..b16517ee1aaf7cfad94978ed1181df6432da6f07 100644 (file)
@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         */
        asoc->peer.sack_needed = 1;
        asoc->peer.sack_cnt = 0;
+       asoc->peer.sack_generation = 1;
 
        /* Assume that the peer will tell us if he recognizes ASCONF
         * as part of INIT exchange.
index f1b7d4bb591e9b648865c4e096ef838e77678442..6ae47acaaec65ceb898e10e462454e667a8e739e 100644 (file)
@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
                /* If the SACK timer is running, we have a pending SACK */
                if (timer_pending(timer)) {
                        struct sctp_chunk *sack;
+
+                       if (pkt->transport->sack_generation !=
+                           pkt->transport->asoc->peer.sack_generation)
+                               return retval;
+
                        asoc->a_rwnd = asoc->rwnd;
                        sack = sctp_make_sack(asoc);
                        if (sack) {
index a85eeeb55dd0022e009895c53a6d8593929b7691..b6de71efb140c538a66e0a7901df2b4402a85bf4 100644 (file)
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
        int len;
        __u32 ctsn;
        __u16 num_gabs, num_dup_tsns;
+       struct sctp_association *aptr = (struct sctp_association *)asoc;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+       struct sctp_transport *trans;
 
        memset(gabs, 0, sizeof(gabs));
        ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
 
+       /* Once we have a sack generated, check to see what our sack
+        * generation is, if its 0, reset the transports to 0, and reset
+        * the association generation to 1
+        *
+        * The idea is that zero is never used as a valid generation for the
+        * association so no transport will match after a wrap event like this,
+        * Until the next sack
+        */
+       if (++aptr->peer.sack_generation == 0) {
+               list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+                                   transports)
+                       trans->sack_generation = 0;
+               aptr->peer.sack_generation = 1;
+       }
 nodata:
        return retval;
 }
index c96d1a81cf4209f6d3ca9b6762fefa47e75867c3..8716da1a859221dc96d206ed517190e9e9cfa2ca 100644 (file)
@@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_REPORT_TSN:
                        /* Record the arrival of a TSN.  */
                        error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                                                cmd->obj.u32);
+                                                cmd->obj.u32, NULL);
                        break;
 
                case SCTP_CMD_REPORT_FWDTSN:
index b026ba0c69922e09eb5c150298f650ea7bb3d1c4..1dcceb6e0ce6c2b9e5f6c0c3abf8075c3e75224c 100644 (file)
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
        memset(&peer->saddr, 0, sizeof(union sctp_addr));
 
+       peer->sack_generation = 0;
+
        /* From 6.3.1 RTO Calculation:
         *
         * C1) Until an RTT measurement has been made for a packet sent to the
index f1e40cebc981ae7962bb9cd20ae84823b70d43cf..b5fb7c409023ff8adea81d41e68ace60781febae 100644 (file)
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
 
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+                    struct sctp_transport *trans)
 {
        u16 gap;
 
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
                 */
                map->max_tsn_seen++;
                map->cumulative_tsn_ack_point++;
+               if (trans)
+                       trans->sack_generation =
+                               trans->asoc->peer.sack_generation;
                map->base_tsn++;
        } else {
                /* Either we already have a gap, or about to record a gap, so
index 8a84017834c211a840e83c1e39bebdbb001ec5c4..33d894776192205cd4b4a9573ccf70664c723b2d 100644 (file)
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * can mark it as received so the tsn_map is updated correctly.
         */
        if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                            ntohl(chunk->subh.data_hdr->tsn)))
+                            ntohl(chunk->subh.data_hdr->tsn),
+                            chunk->transport))
                goto fail_mark;
 
        /* First calculate the padding, so we don't inadvertently
index f2d1de7f2ffbd5a219759f55bdc546f2edbf19b0..f5a6a4f4faf721af4874538093cb003f4efc202c 100644 (file)
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        if (chunk && (freed >= needed)) {
                __u32 tsn;
                tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
index 3efc9b12aef44016201b02eeafcc10140f17a240..860aeb349cb337bbccf4346d99120d4d1fd51c90 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mman.h>
 #include <linux/mount.h>
 #include <linux/personality.h>
+#include <linux/backing-dev.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
index 5ccf10a4d5932b187bba679e40e8f424d4739551..aa4c25e0f3277fce38520c72c9759bfc90b9022d 100644 (file)
@@ -6688,6 +6688,31 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
        {}
 };
 
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+       int val, coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030) {
+                       val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+                       alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+               }
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
+               break;
+       }
+}
 
 /*
  */
@@ -6707,6 +6732,9 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
+       spec->init_hook = alc662_fill_coef;
+       alc662_fill_coef(codec);
+
        alc_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
        alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
index 64d2a4fa34b27d81ad84d0e64ed625e04e293b7a..e9b62b5ea637580ea7e8904ec2c68662ed1471fc 100644 (file)
@@ -935,9 +935,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 6f097fb60683e7b5605b38e373c7f1c75f4158c9..08c7f6685ff0937a367824c7a731fd4fb8e48d72 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index acbdc5fde9236be0e43a5767bd0135eb92450e68..32682c1b7cdece413693db6f6487ae4df640da09 100644 (file)
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
 
 static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
        5644800,
+       3763200,
        2882400,
        1881600,
        1411200,
index a3dbadb26ef596d29f6e2e9c3a5326bff4dfdad7..f3c716a4cad388408d4ec78968017fb2589d04a8 100644 (file)
@@ -12,7 +12,7 @@ SYNOPSIS
 
 DESCRIPTION
 -----------
-This 'perf bench' command is general framework for benchmark suites.
+This 'perf bench' command is general framework for benchmark suites.
 
 COMMON OPTIONS
 --------------
@@ -45,14 +45,20 @@ SUBSYSTEM
 'sched'::
        Scheduler and IPC mechanisms.
 
+'mem'::
+       Memory access performance.
+
+'all'::
+       All benchmark subsystems.
+
 SUITES FOR 'sched'
 ~~~~~~~~~~~~~~~~~~
 *messaging*::
 Suite for evaluating performance of scheduler and IPC mechanisms.
 Based on hackbench by Rusty Russell.
 
-Options of *pipe*
-^^^^^^^^^^^^^^^^^
+Options of *messaging*
+^^^^^^^^^^^^^^^^^^^^^^
 -p::
 --pipe::
 Use pipe() instead of socketpair()
@@ -115,6 +121,72 @@ Example of *pipe*
                 59004 ops/sec
 ---------------------
 
+SUITES FOR 'mem'
+~~~~~~~~~~~~~~~~
+*memcpy*::
+Suite for evaluating performance of simple memory copy in various ways.
+
+Options of *memcpy*
+^^^^^^^^^^^^^^^^^^^
+-l::
+--length::
+Specify length of memory to copy (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-r::
+--routine::
+Specify routine to copy (default: default).
+Available routines are depend on the architecture.
+On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported.
+
+-i::
+--iterations::
+Repeat memcpy invocation this number of times.
+
+-c::
+--clock::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
+-o::
+--only-prefault::
+Show only the result with page faults before memcpy.
+
+-n::
+--no-prefault::
+Show only the result without page faults before memcpy.
+
+*memset*::
+Suite for evaluating performance of simple memory set in various ways.
+
+Options of *memset*
+^^^^^^^^^^^^^^^^^^^
+-l::
+--length::
+Specify length of memory to set (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-r::
+--routine::
+Specify routine to set (default: default).
+Available routines are depend on the architecture.
+On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported.
+
+-i::
+--iterations::
+Repeat memset invocation this number of times.
+
+-c::
+--clock::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
+-o::
+--only-prefault::
+Show only the result with page faults before memset.
+
+-n::
+--no-prefault::
+Show only the result without page faults before memset.
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 2d89f02719b5f6ce52e502414c031a2d7610ca9a..495210a612c4d218170f696dcf5d93323f22a571 100644 (file)
@@ -57,7 +57,7 @@ OPTIONS
 
 -s::
 --sort=::
-       Sort by key(s): pid, comm, dso, symbol, parent.
+       Sort by key(s): pid, comm, dso, symbol, parent, srcline.
 
 -p::
 --parent=<regex>::
index 4a5680cb242ebe656c266c19bcf81bc64bbc623c..5b80d84d6b4a3e7e430499d0381c11a8856f316d 100644 (file)
@@ -112,7 +112,7 @@ Default is to monitor all CPUS.
 
 -s::
 --sort::
-       Sort by key(s): pid, comm, dso, symbol, parent
+       Sort by key(s): pid, comm, dso, symbol, parent, srcline.
 
 -n::
 --show-nr-samples::
index 0eee64cfe9a0f48ced0ee2397068979763355cd4..75d74e5db8d552372aa0b7c19861c76458784f5e 100644 (file)
@@ -155,7 +155,7 @@ endif
 
 ### --- END CONFIGURATION SECTION ---
 
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 BASIC_LDFLAGS =
 
 # Guard against environment variables
@@ -503,6 +503,7 @@ else
                LIB_OBJS += $(OUTPUT)ui/progress.o
                LIB_OBJS += $(OUTPUT)ui/util.o
                LIB_OBJS += $(OUTPUT)ui/tui/setup.o
+               LIB_OBJS += $(OUTPUT)ui/tui/util.o
                LIB_H += ui/browser.h
                LIB_H += ui/browsers/map.h
                LIB_H += ui/helpline.h
@@ -522,13 +523,18 @@ else
                msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
                BASIC_CFLAGS += -DNO_GTK2_SUPPORT
        else
+               ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y)
+                       BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR
+               endif
                BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
                EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
                LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
                LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
+               LIB_OBJS += $(OUTPUT)ui/gtk/util.o
                # Make sure that it'd be included only once.
                ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
                        LIB_OBJS += $(OUTPUT)ui/setup.o
+                       LIB_OBJS += $(OUTPUT)ui/util.o
                endif
        endif
 endif
index 71557225bf92aeb708100acfa3f5027948de3831..d990365cafa04b5817540991482aefed1874e34e 100644 (file)
@@ -32,13 +32,13 @@ static bool         no_prefault;
 static const struct option options[] = {
        OPT_STRING('l', "length", &length_str, "1MB",
                    "Specify length of memory to copy. "
-                   "available unit: B, MB, GB (upper and lower)"),
+                   "Available units: B, KB, MB, GB and TB (upper and lower)"),
        OPT_STRING('r', "routine", &routine, "default",
                    "Specify routine to copy"),
        OPT_INTEGER('i', "iterations", &iterations,
                    "repeat memcpy() invocation this number of times"),
        OPT_BOOLEAN('c', "clock", &use_clock,
-                   "Use CPU clock for measuring"),
+                   "Use cycles event instead of gettimeofday() for measuring"),
        OPT_BOOLEAN('o', "only-prefault", &only_prefault,
                    "Show only the result with page faults before memcpy()"),
        OPT_BOOLEAN('n', "no-prefault", &no_prefault,
index e9079185bd72d32e48dfee280ead72c96c7d9ef3..bf0d5f552017537718b87c618f3528673e98baf9 100644 (file)
@@ -31,14 +31,14 @@ static bool         no_prefault;
 
 static const struct option options[] = {
        OPT_STRING('l', "length", &length_str, "1MB",
-                   "Specify length of memory to copy. "
-                   "available unit: B, MB, GB (upper and lower)"),
+                   "Specify length of memory to set. "
+                   "Available units: B, KB, MB, GB and TB (upper and lower)"),
        OPT_STRING('r', "routine", &routine, "default",
-                   "Specify routine to copy"),
+                   "Specify routine to set"),
        OPT_INTEGER('i', "iterations", &iterations,
                    "repeat memset() invocation this number of times"),
        OPT_BOOLEAN('c', "clock", &use_clock,
-                   "Use CPU clock for measuring"),
+                   "Use cycles event instead of gettimeofday() for measuring"),
        OPT_BOOLEAN('o', "only-prefault", &only_prefault,
                    "Show only the result with page faults before memset()"),
        OPT_BOOLEAN('n', "no-prefault", &no_prefault,
index b0e74ab2d7a2990d8d1fce67b95a555d53a70ee3..1f3100216448da9d0d794847a0c0f0b315bd59fb 100644 (file)
@@ -33,7 +33,7 @@ struct bench_suite {
 };
                                                \
 /* sentinel: easy for help */
-#define suite_all { "all", "test all suite (pseudo suite)", NULL }
+#define suite_all { "all", "Test all benchmark suites", NULL }
 
 static struct bench_suite sched_suites[] = {
        { "messaging",
@@ -75,7 +75,7 @@ static struct bench_subsys subsystems[] = {
          "memory access performance",
          mem_suites },
        { "all",                /* sentinel: easy for help */
-         "test all subsystem (pseudo subsystem)",
+         "all benchmark subsystem",
          NULL },
        { NULL,
          NULL,
index acd78dc283411692f9e7e48b78085d9b5da63471..0dd5a058f766e2e2b2ad7196af78bf3d7a81d563 100644 (file)
@@ -60,7 +60,7 @@ static int __cmd_evlist(const char *input_name, struct perf_attr_details *detail
        list_for_each_entry(pos, &session->evlist->entries, node) {
                bool first = true;
 
-               printf("%s", event_name(pos));
+               printf("%s", perf_evsel__name(pos));
 
                if (details->verbose || details->freq) {
                        comma_printf(&first, " sample_freq=%" PRIu64,
index 547af48deb4f99081221f566f48dc023ab6b0e76..ce35015f2dc6423901f7d42b223754b4b67117e3 100644 (file)
@@ -57,6 +57,11 @@ static unsigned long nr_allocs, nr_cross_allocs;
 
 #define PATH_SYS_NODE  "/sys/devices/system/node"
 
+struct perf_kmem {
+       struct perf_tool    tool;
+       struct perf_session *session;
+};
+
 static void init_cpunode_map(void)
 {
        FILE *fp;
@@ -278,14 +283,16 @@ static void process_free_event(void *data,
        s_alloc->alloc_cpu = -1;
 }
 
-static void process_raw_event(union perf_event *raw_event __used, void *data,
+static void process_raw_event(struct perf_tool *tool,
+                             union perf_event *raw_event __used, void *data,
                              int cpu, u64 timestamp, struct thread *thread)
 {
+       struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool);
        struct event_format *event;
        int type;
 
-       type = trace_parse_common_type(data);
-       event = trace_find_event(type);
+       type = trace_parse_common_type(kmem->session->pevent, data);
+       event = pevent_find_event(kmem->session->pevent, type);
 
        if (!strcmp(event->name, "kmalloc") ||
            !strcmp(event->name, "kmem_cache_alloc")) {
@@ -306,7 +313,7 @@ static void process_raw_event(union perf_event *raw_event __used, void *data,
        }
 }
 
-static int process_sample_event(struct perf_tool *tool __used,
+static int process_sample_event(struct perf_tool *tool,
                                union perf_event *event,
                                struct perf_sample *sample,
                                struct perf_evsel *evsel __used,
@@ -322,16 +329,18 @@ static int process_sample_event(struct perf_tool *tool __used,
 
        dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
 
-       process_raw_event(event, sample->raw_data, sample->cpu,
+       process_raw_event(tool, event, sample->raw_data, sample->cpu,
                          sample->time, thread);
 
        return 0;
 }
 
-static struct perf_tool perf_kmem = {
-       .sample                 = process_sample_event,
-       .comm                   = perf_event__process_comm,
-       .ordered_samples        = true,
+static struct perf_kmem perf_kmem = {
+       .tool = {
+               .sample                 = process_sample_event,
+               .comm                   = perf_event__process_comm,
+               .ordered_samples        = true,
+       },
 };
 
 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -486,11 +495,15 @@ static void sort_result(void)
 static int __cmd_kmem(void)
 {
        int err = -EINVAL;
-       struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-                                                        0, false, &perf_kmem);
+       struct perf_session *session;
+
+       session = perf_session__new(input_name, O_RDONLY, 0, false,
+                                   &perf_kmem.tool);
        if (session == NULL)
                return -ENOMEM;
 
+       perf_kmem.session = session;
+
        if (perf_session__create_kernel_maps(session) < 0)
                goto out_delete;
 
@@ -498,7 +511,7 @@ static int __cmd_kmem(void)
                goto out_delete;
 
        setup_pager();
-       err = perf_session__process_events(session, &perf_kmem);
+       err = perf_session__process_events(session, &perf_kmem.tool);
        if (err != 0)
                goto out_delete;
        sort_result();
index fd53319de20d68f906536787c48523769e565aba..b3c4285488688ba19b6073bd6c743a84e4e0e8b1 100644 (file)
@@ -724,8 +724,8 @@ process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
        struct event_format *event;
        int type;
 
-       type = trace_parse_common_type(data);
-       event = trace_find_event(type);
+       type = trace_parse_common_type(session->pevent, data);
+       event = pevent_find_event(session->pevent, type);
 
        if (!strcmp(event->name, "lock_acquire"))
                process_lock_acquire_event(data, event, cpu, timestamp, thread);
index f95840d04e4c7a224821e395600df2bbdc7e4323..f5a6452931e6dabaa4f3b4593ab91f30aa7d586b 100644 (file)
@@ -265,7 +265,7 @@ try_again:
 
                        if (err == ENOENT) {
                                ui__error("The %s event is not supported.\n",
-                                           event_name(pos));
+                                         perf_evsel__name(pos));
                                exit(EXIT_FAILURE);
                        }
 
@@ -916,7 +916,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                usage_with_options(record_usage, record_options);
 
        list_for_each_entry(pos, &evsel_list->entries, node) {
-               if (perf_header__push_event(pos->attr.config, event_name(pos)))
+               if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
                        goto out_free_fd;
        }
 
index 25249f76329daf851f925240f8c295c0c3fc5bdc..69b1c1185159174f070425d15c17afc8d5325f50 100644 (file)
@@ -69,7 +69,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
 
        if ((sort__has_parent || symbol_conf.use_callchain)
            && sample->callchain) {
-               err = machine__resolve_callchain(machine, evsel, al->thread,
+               err = machine__resolve_callchain(machine, al->thread,
                                                 sample->callchain, &parent);
                if (err)
                        return err;
@@ -140,7 +140,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
        struct hist_entry *he;
 
        if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
-               err = machine__resolve_callchain(machine, evsel, al->thread,
+               err = machine__resolve_callchain(machine, al->thread,
                                                 sample->callchain, &parent);
                if (err)
                        return err;
@@ -230,7 +230,7 @@ static int process_read_event(struct perf_tool *tool,
        struct perf_report *rep = container_of(tool, struct perf_report, tool);
 
        if (rep->show_threads) {
-               const char *name = evsel ? event_name(evsel) : "unknown";
+               const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
                perf_read_values_add_value(&rep->show_threads_values,
                                           event->read.pid, event->read.tid,
                                           event->read.id,
@@ -239,17 +239,18 @@ static int process_read_event(struct perf_tool *tool,
        }
 
        dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
-                   evsel ? event_name(evsel) : "FAIL",
+                   evsel ? perf_evsel__name(evsel) : "FAIL",
                    event->read.value);
 
        return 0;
 }
 
+/* For pipe mode, sample_type is not currently set */
 static int perf_report__setup_sample_type(struct perf_report *rep)
 {
        struct perf_session *self = rep->session;
 
-       if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+       if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
                if (sort__has_parent) {
                        ui__error("Selected --sort parent, but no "
                                    "callchain data. Did you call "
@@ -272,7 +273,8 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
        }
 
        if (sort__branch_mode == 1) {
-               if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+               if (!self->fd_pipe &&
+                   !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
                        ui__error("Selected -b but no branch data. "
                                  "Did you call perf record without -b?\n");
                        return -1;
@@ -314,7 +316,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
 
        list_for_each_entry(pos, &evlist->entries, node) {
                struct hists *hists = &pos->hists;
-               const char *evname = event_name(pos);
+               const char *evname = perf_evsel__name(pos);
 
                hists__fprintf_nr_sample_events(hists, evname, stdout);
                hists__fprintf(hists, NULL, false, true, 0, 0, stdout);
index b125e07eb39937e314b314943ad4061ba467a98c..7a9ad2b1ee7601de26ed2d97ff91fae39a46f185 100644 (file)
@@ -43,6 +43,11 @@ static u64                   sleep_measurement_overhead;
 
 static unsigned long           nr_tasks;
 
+struct perf_sched {
+       struct perf_tool    tool;
+       struct perf_session *session;
+};
+
 struct sched_atom;
 
 struct task_desc {
@@ -1597,11 +1602,13 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
                                                 struct perf_evsel *evsel,
                                                 struct machine *machine)
 {
+       struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+       struct pevent *pevent = sched->session->pevent;
        struct thread *thread = machine__findnew_thread(machine, sample->pid);
 
        if (thread == NULL) {
                pr_debug("problem processing %s event, skipping it.\n",
-                        evsel->name);
+                        perf_evsel__name(evsel));
                return -1;
        }
 
@@ -1612,7 +1619,8 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
                tracepoint_handler f = evsel->handler.func;
 
                if (evsel->handler.data == NULL)
-                       evsel->handler.data = trace_find_event(evsel->attr.config);
+                       evsel->handler.data = pevent_find_event(pevent,
+                                                         evsel->attr.config);
 
                f(tool, evsel->handler.data, sample, machine, thread);
        }
@@ -1620,12 +1628,14 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
        return 0;
 }
 
-static struct perf_tool perf_sched = {
-       .sample                 = perf_sched__process_tracepoint_sample,
-       .comm                   = perf_event__process_comm,
-       .lost                   = perf_event__process_lost,
-       .fork                   = perf_event__process_task,
-       .ordered_samples        = true,
+static struct perf_sched perf_sched = {
+       .tool = {
+               .sample          = perf_sched__process_tracepoint_sample,
+               .comm            = perf_event__process_comm,
+               .lost            = perf_event__process_lost,
+               .fork            = perf_event__process_task,
+               .ordered_samples = true,
+       },
 };
 
 static void read_events(bool destroy, struct perf_session **psession)
@@ -1640,16 +1650,20 @@ static void read_events(bool destroy, struct perf_session **psession)
                { "sched:sched_process_exit", process_sched_exit_event, },
                { "sched:sched_migrate_task", process_sched_migrate_task_event, },
        };
-       struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-                                                        0, false, &perf_sched);
+       struct perf_session *session;
+
+       session = perf_session__new(input_name, O_RDONLY, 0, false,
+                                   &perf_sched.tool);
        if (session == NULL)
                die("No Memory");
 
-       err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers);
+       perf_sched.session = session;
+
+       err = perf_session__set_tracepoints_handlers(session, handlers);
        assert(err == 0);
 
        if (perf_session__has_traces(session, "record -R")) {
-               err = perf_session__process_events(session, &perf_sched);
+               err = perf_session__process_events(session, &perf_sched.tool);
                if (err)
                        die("Failed to process events, error %d", err);
 
index 8e395a538eb92b212ee20ac9e475c7ab70f2f191..1e60ab70b2b14789b17a2a7199f5a8d175709242 100644 (file)
@@ -28,6 +28,11 @@ static bool                  system_wide;
 static const char              *cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 
+struct perf_script {
+       struct perf_tool    tool;
+       struct perf_session *session;
+};
+
 enum perf_output_field {
        PERF_OUTPUT_COMM            = 1U << 0,
        PERF_OUTPUT_TID             = 1U << 1,
@@ -137,10 +142,11 @@ static const char *output_field2str(enum perf_output_field field)
 
 #define PRINT_FIELD(x)  (output[attr->type].fields & PERF_OUTPUT_##x)
 
-static int perf_event_attr__check_stype(struct perf_event_attr *attr,
-                                 u64 sample_type, const char *sample_msg,
-                                 enum perf_output_field field)
+static int perf_evsel__check_stype(struct perf_evsel *evsel,
+                                  u64 sample_type, const char *sample_msg,
+                                  enum perf_output_field field)
 {
+       struct perf_event_attr *attr = &evsel->attr;
        int type = attr->type;
        const char *evname;
 
@@ -148,7 +154,7 @@ static int perf_event_attr__check_stype(struct perf_event_attr *attr,
                return 0;
 
        if (output[type].user_set) {
-               evname = __event_name(attr->type, attr->config);
+               evname = perf_evsel__name(evsel);
                pr_err("Samples for '%s' event do not have %s attribute set. "
                       "Cannot print '%s' field.\n",
                       evname, sample_msg, output_field2str(field));
@@ -157,7 +163,7 @@ static int perf_event_attr__check_stype(struct perf_event_attr *attr,
 
        /* user did not ask for it explicitly so remove from the default list */
        output[type].fields &= ~field;
-       evname = __event_name(attr->type, attr->config);
+       evname = perf_evsel__name(evsel);
        pr_debug("Samples for '%s' event do not have %s attribute set. "
                 "Skipping '%s' field.\n",
                 evname, sample_msg, output_field2str(field));
@@ -175,8 +181,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
                return -EINVAL;
 
        if (PRINT_FIELD(IP)) {
-               if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP",
-                                          PERF_OUTPUT_IP))
+               if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP",
+                                           PERF_OUTPUT_IP))
                        return -EINVAL;
 
                if (!no_callchain &&
@@ -185,8 +191,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
        }
 
        if (PRINT_FIELD(ADDR) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_ADDR, "ADDR",
-                                      PERF_OUTPUT_ADDR))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
+                                       PERF_OUTPUT_ADDR))
                return -EINVAL;
 
        if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
@@ -208,18 +214,18 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
        }
 
        if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID",
-                                      PERF_OUTPUT_TID|PERF_OUTPUT_PID))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID",
+                                       PERF_OUTPUT_TID|PERF_OUTPUT_PID))
                return -EINVAL;
 
        if (PRINT_FIELD(TIME) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME",
-                                      PERF_OUTPUT_TIME))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME",
+                                       PERF_OUTPUT_TIME))
                return -EINVAL;
 
        if (PRINT_FIELD(CPU) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU",
-                                      PERF_OUTPUT_CPU))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
+                                       PERF_OUTPUT_CPU))
                return -EINVAL;
 
        return 0;
@@ -256,11 +262,13 @@ static int perf_session__check_output_opt(struct perf_session *session)
        return 0;
 }
 
-static void print_sample_start(struct perf_sample *sample,
+static void print_sample_start(struct pevent *pevent,
+                              struct perf_sample *sample,
                               struct thread *thread,
-                              struct perf_event_attr *attr)
+                              struct perf_evsel *evsel)
 {
        int type;
+       struct perf_event_attr *attr = &evsel->attr;
        struct event_format *event;
        const char *evname = NULL;
        unsigned long secs;
@@ -300,12 +308,18 @@ static void print_sample_start(struct perf_sample *sample,
 
        if (PRINT_FIELD(EVNAME)) {
                if (attr->type == PERF_TYPE_TRACEPOINT) {
-                       type = trace_parse_common_type(sample->raw_data);
-                       event = trace_find_event(type);
+                       /*
+                        * XXX Do we really need this here?
+                        * perf_evlist__set_tracepoint_names should have done
+                        * this already
+                        */
+                       type = trace_parse_common_type(pevent,
+                                                      sample->raw_data);
+                       event = pevent_find_event(pevent, type);
                        if (event)
                                evname = event->name;
                } else
-                       evname = __event_name(attr->type, attr->config);
+                       evname = perf_evsel__name(evsel);
 
                printf("%s: ", evname ? evname : "[unknown]");
        }
@@ -387,7 +401,7 @@ static void print_sample_bts(union perf_event *event,
                        printf(" ");
                else
                        printf("\n");
-               perf_event__print_ip(event, sample, machine, evsel,
+               perf_event__print_ip(event, sample, machine,
                                     PRINT_FIELD(SYM), PRINT_FIELD(DSO),
                                     PRINT_FIELD(SYMOFFSET));
        }
@@ -402,6 +416,7 @@ static void print_sample_bts(union perf_event *event,
 }
 
 static void process_event(union perf_event *event __unused,
+                         struct pevent *pevent,
                          struct perf_sample *sample,
                          struct perf_evsel *evsel,
                          struct machine *machine,
@@ -412,7 +427,7 @@ static void process_event(union perf_event *event __unused,
        if (output[attr->type].fields == 0)
                return;
 
-       print_sample_start(sample, thread, attr);
+       print_sample_start(pevent, sample, thread, evsel);
 
        if (is_bts_event(attr)) {
                print_sample_bts(event, sample, evsel, machine, thread);
@@ -420,7 +435,7 @@ static void process_event(union perf_event *event __unused,
        }
 
        if (PRINT_FIELD(TRACE))
-               print_trace_event(sample->cpu, sample->raw_data,
+               print_trace_event(pevent, sample->cpu, sample->raw_data,
                                  sample->raw_size);
 
        if (PRINT_FIELD(ADDR))
@@ -431,7 +446,7 @@ static void process_event(union perf_event *event __unused,
                        printf(" ");
                else
                        printf("\n");
-               perf_event__print_ip(event, sample, machine, evsel,
+               perf_event__print_ip(event, sample, machine,
                                     PRINT_FIELD(SYM), PRINT_FIELD(DSO),
                                     PRINT_FIELD(SYMOFFSET));
        }
@@ -451,7 +466,8 @@ static int default_stop_script(void)
        return 0;
 }
 
-static int default_generate_script(const char *outfile __unused)
+static int default_generate_script(struct pevent *pevent __unused,
+                                  const char *outfile __unused)
 {
        return 0;
 }
@@ -489,6 +505,7 @@ static int process_sample_event(struct perf_tool *tool __used,
                                struct machine *machine)
 {
        struct addr_location al;
+       struct perf_script *scr = container_of(tool, struct perf_script, tool);
        struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
 
        if (thread == NULL) {
@@ -520,24 +537,27 @@ static int process_sample_event(struct perf_tool *tool __used,
        if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
                return 0;
 
-       scripting_ops->process_event(event, sample, evsel, machine, thread);
+       scripting_ops->process_event(event, scr->session->pevent,
+                                    sample, evsel, machine, thread);
 
        evsel->hists.stats.total_period += sample->period;
        return 0;
 }
 
-static struct perf_tool perf_script = {
-       .sample          = process_sample_event,
-       .mmap            = perf_event__process_mmap,
-       .comm            = perf_event__process_comm,
-       .exit            = perf_event__process_task,
-       .fork            = perf_event__process_task,
-       .attr            = perf_event__process_attr,
-       .event_type      = perf_event__process_event_type,
-       .tracing_data    = perf_event__process_tracing_data,
-       .build_id        = perf_event__process_build_id,
-       .ordered_samples = true,
-       .ordering_requires_timestamps = true,
+static struct perf_script perf_script = {
+       .tool = {
+               .sample          = process_sample_event,
+               .mmap            = perf_event__process_mmap,
+               .comm            = perf_event__process_comm,
+               .exit            = perf_event__process_task,
+               .fork            = perf_event__process_task,
+               .attr            = perf_event__process_attr,
+               .event_type      = perf_event__process_event_type,
+               .tracing_data    = perf_event__process_tracing_data,
+               .build_id        = perf_event__process_build_id,
+               .ordered_samples = true,
+               .ordering_requires_timestamps = true,
+       },
 };
 
 extern volatile int session_done;
@@ -553,7 +573,7 @@ static int __cmd_script(struct perf_session *session)
 
        signal(SIGINT, sig_handler);
 
-       ret = perf_session__process_events(session, &perf_script);
+       ret = perf_session__process_events(session, &perf_script.tool);
 
        if (debug_mode)
                pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -1335,10 +1355,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
        if (!script_name)
                setup_pager();
 
-       session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script);
+       session = perf_session__new(input_name, O_RDONLY, 0, false,
+                                   &perf_script.tool);
        if (session == NULL)
                return -ENOMEM;
 
+       perf_script.session = session;
+
        if (cpu_list) {
                if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
                        return -1;
@@ -1384,7 +1407,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
                        return -1;
                }
 
-               err = scripting_ops->generate_script("perf-script");
+               err = scripting_ops->generate_script(session->pevent,
+                                                    "perf-script");
                goto out;
        }
 
index 07b5c7703dd10677e12a5b6e36c8603f18fd0f7e..861f0aec77aeacf099653d87ca9c6df9b6f2787c 100644 (file)
@@ -391,7 +391,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
 
        if (verbose) {
                fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
-                       event_name(counter), count[0], count[1], count[2]);
+                       perf_evsel__name(counter), count[0], count[1], count[2]);
        }
 
        /*
@@ -496,7 +496,7 @@ static int run_perf_stat(int argc __used, const char **argv)
                            errno == ENXIO) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
-                                                   event_name(counter));
+                                                   perf_evsel__name(counter));
                                counter->supported = false;
                                continue;
                        }
@@ -594,7 +594,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
                        csv_output ? 0 : -4,
                        evsel_list->cpus->map[cpu], csv_sep);
 
-       fprintf(output, fmt, cpustr, msecs, csv_sep, event_name(evsel));
+       fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
 
        if (evsel->cgrp)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -792,7 +792,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
        else
                cpu = 0;
 
-       fprintf(output, fmt, cpustr, avg, csv_sep, event_name(evsel));
+       fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel));
 
        if (evsel->cgrp)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -908,7 +908,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
                        counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
                        csv_sep,
                        csv_output ? 0 : -24,
-                       event_name(counter));
+                       perf_evsel__name(counter));
 
                if (counter->cgrp)
                        fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
@@ -961,7 +961,7 @@ static void print_counter(struct perf_evsel *counter)
                                counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
                                csv_sep,
                                csv_output ? 0 : -24,
-                               event_name(counter));
+                               perf_evsel__name(counter));
 
                        if (counter->cgrp)
                                fprintf(output, "%s%s",
index 5a8727c0875772c9c37c3fc381b1812161c3b983..5ce30305462b1775fbfc80f9569e29328d25edd3 100644 (file)
@@ -583,7 +583,7 @@ static int test__basic_mmap(void)
                if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
                        pr_debug("expected %d %s events, got %d\n",
                                 expected_nr_events[evsel->idx],
-                                event_name(evsel), nr_events[evsel->idx]);
+                                perf_evsel__name(evsel), nr_events[evsel->idx]);
                        goto out_munmap;
                }
        }
index 6bb0277b7dfecdbf63e727592995480a409f16f5..e3cab5f088f8def4264a0ec65dba4f85b0b0f954 100644 (file)
@@ -245,7 +245,7 @@ static void perf_top__show_details(struct perf_top *top)
        if (notes->src == NULL)
                goto out_unlock;
 
-       printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
+       printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
        printf("  Events  Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
 
        more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
@@ -408,7 +408,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
        fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
 
        if (top->evlist->nr_entries > 1)
-               fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top->sym_evsel));
+               fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", perf_evsel__name(top->sym_evsel));
 
        fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
 
@@ -503,13 +503,13 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
                                fprintf(stderr, "\nAvailable events:");
 
                                list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
-                                       fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
+                                       fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
 
                                prompt_integer(&counter, "Enter details event counter");
 
                                if (counter >= top->evlist->nr_entries) {
                                        top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
-                                       fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
+                                       fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
                                        sleep(1);
                                        break;
                                }
@@ -774,7 +774,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
 
                if ((sort__has_parent || symbol_conf.use_callchain) &&
                    sample->callchain) {
-                       err = machine__resolve_callchain(machine, evsel, al.thread,
+                       err = machine__resolve_callchain(machine, al.thread,
                                                         sample->callchain, &parent);
                        if (err)
                                return;
@@ -960,7 +960,7 @@ try_again:
 
                        if (err == ENOENT) {
                                ui__error("The %s event is not supported.\n",
-                                           event_name(counter));
+                                         perf_evsel__name(counter));
                                goto out_err;
                        } else if (err == EMFILE) {
                                ui__error("Too many events are opened.\n"
index d9084e03ce56676628736c5814e27badbe2ef730..6c18785a6417986dabe47f775b0fd07067447012 100644 (file)
@@ -78,6 +78,19 @@ int main(int argc, char *argv[])
         return 0;
 }
 endef
+
+define SOURCE_GTK2_INFOBAR
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error \"-Wstrict-prototypes\"
+
+int main(void)
+{
+       gtk_info_bar_new();
+
+       return 0;
+}
+endef
 endif
 
 ifndef NO_LIBPERL
index 34b1c46eaf42c63d36ed5c36cbaceea38c2347a9..67a2703e666a360c1fd71a8af6ea150174ebeb70 100644 (file)
@@ -814,7 +814,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
 {
        struct disasm_line *pos, *n;
        struct annotation *notes;
-       const size_t size = symbol__size(sym);
+       size_t size;
        struct map_symbol ms = {
                .map = map,
                .sym = sym,
@@ -834,6 +834,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
        if (sym == NULL)
                return -1;
 
+       size = symbol__size(sym);
+
        if (map->dso->annotate_warned)
                return -1;
 
index 53f6697d014e788396b474c6be957893b8fd09ca..482f0517b61e20f61a18933133717ce53fc0e1c1 100644 (file)
@@ -23,6 +23,7 @@ struct hist_browser {
        struct hists        *hists;
        struct hist_entry   *he_selection;
        struct map_symbol   *selection;
+       int                  print_seq;
        bool                 has_symbols;
 };
 
@@ -800,6 +801,196 @@ do_offset:
        }
 }
 
+static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser,
+                                                       struct callchain_node *chain_node,
+                                                       u64 total, int level,
+                                                       FILE *fp)
+{
+       struct rb_node *node;
+       int offset = level * LEVEL_OFFSET_STEP;
+       u64 new_total, remaining;
+       int printed = 0;
+
+       if (callchain_param.mode == CHAIN_GRAPH_REL)
+               new_total = chain_node->children_hit;
+       else
+               new_total = total;
+
+       remaining = new_total;
+       node = rb_first(&chain_node->rb_root);
+       while (node) {
+               struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+               struct rb_node *next = rb_next(node);
+               u64 cumul = callchain_cumul_hits(child);
+               struct callchain_list *chain;
+               char folded_sign = ' ';
+               int first = true;
+               int extra_offset = 0;
+
+               remaining -= cumul;
+
+               list_for_each_entry(chain, &child->val, list) {
+                       char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+                       const char *str;
+                       bool was_first = first;
+
+                       if (first)
+                               first = false;
+                       else
+                               extra_offset = LEVEL_OFFSET_STEP;
+
+                       folded_sign = callchain_list__folded(chain);
+
+                       alloc_str = NULL;
+                       str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+                       if (was_first) {
+                               double percent = cumul * 100.0 / new_total;
+
+                               if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
+                                       str = "Not enough memory!";
+                               else
+                                       str = alloc_str;
+                       }
+
+                       printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str);
+                       free(alloc_str);
+                       if (folded_sign == '+')
+                               break;
+               }
+
+               if (folded_sign == '-') {
+                       const int new_level = level + (extra_offset ? 2 : 1);
+                       printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total,
+                                                                               new_level, fp);
+               }
+
+               node = next;
+       }
+
+       return printed;
+}
+
+static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
+                                               struct callchain_node *node,
+                                               int level, FILE *fp)
+{
+       struct callchain_list *chain;
+       int offset = level * LEVEL_OFFSET_STEP;
+       char folded_sign = ' ';
+       int printed = 0;
+
+       list_for_each_entry(chain, &node->val, list) {
+               char ipstr[BITS_PER_LONG / 4 + 1], *s;
+
+               folded_sign = callchain_list__folded(chain);
+               s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+               printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
+       }
+
+       if (folded_sign == '-')
+               printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node,
+                                                                       browser->hists->stats.total_period,
+                                                                       level + 1,  fp);
+       return printed;
+}
+
+static int hist_browser__fprintf_callchain(struct hist_browser *browser,
+                                          struct rb_root *chain, int level, FILE *fp)
+{
+       struct rb_node *nd;
+       int printed = 0;
+
+       for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+               struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+
+               printed += hist_browser__fprintf_callchain_node(browser, node, level, fp);
+       }
+
+       return printed;
+}
+
+static int hist_browser__fprintf_entry(struct hist_browser *browser,
+                                      struct hist_entry *he, FILE *fp)
+{
+       char s[8192];
+       double percent;
+       int printed = 0;
+       char folded_sign = ' ';
+
+       if (symbol_conf.use_callchain)
+               folded_sign = hist_entry__folded(he);
+
+       hist_entry__snprintf(he, s, sizeof(s), browser->hists);
+       percent = (he->period * 100.0) / browser->hists->stats.total_period;
+
+       if (symbol_conf.use_callchain)
+               printed += fprintf(fp, "%c ", folded_sign);
+
+       printed += fprintf(fp, " %5.2f%%", percent);
+
+       if (symbol_conf.show_nr_samples)
+               printed += fprintf(fp, " %11u", he->nr_events);
+
+       if (symbol_conf.show_total_period)
+               printed += fprintf(fp, " %12" PRIu64, he->period);
+
+       printed += fprintf(fp, "%s\n", rtrim(s));
+
+       if (folded_sign == '-')
+               printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp);
+
+       return printed;
+}
+
+static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp)
+{
+       struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries));
+       int printed = 0;
+
+       while (nd) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+               printed += hist_browser__fprintf_entry(browser, h, fp);
+               nd = hists__filter_entries(rb_next(nd));
+       }
+
+       return printed;
+}
+
+static int hist_browser__dump(struct hist_browser *browser)
+{
+       char filename[64];
+       FILE *fp;
+
+       while (1) {
+               scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq);
+               if (access(filename, F_OK))
+                       break;
+               /*
+                * XXX: Just an arbitrary lazy upper limit
+                */
+               if (++browser->print_seq == 8192) {
+                       ui_helpline__fpush("Too many perf.hist.N files, nothing written!");
+                       return -1;
+               }
+       }
+
+       fp = fopen(filename, "w");
+       if (fp == NULL) {
+               char bf[64];
+               strerror_r(errno, bf, sizeof(bf));
+               ui_helpline__fpush("Couldn't write to %s: %s", filename, bf);
+               return -1;
+       }
+
+       ++browser->print_seq;
+       hist_browser__fprintf(browser, fp);
+       fclose(fp);
+       ui_helpline__fpush("%s written!", filename);
+
+       return 0;
+}
+
 static struct hist_browser *hist_browser__new(struct hists *hists)
 {
        struct hist_browser *browser = zalloc(sizeof(*browser));
@@ -937,6 +1128,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                            browser->selection->map->dso->annotate_warned)
                                continue;
                        goto do_annotate;
+               case 'P':
+                       hist_browser__dump(browser);
+                       continue;
                case 'd':
                        goto zoom_dso;
                case 't':
@@ -969,6 +1163,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        "E             Expand all callchains\n"
                                        "d             Zoom into current DSO\n"
                                        "t             Zoom into current Thread\n"
+                                       "P             Print histograms to perf.hist.N\n"
                                        "/             Filter symbol by name");
                        continue;
                case K_ENTER:
@@ -1172,7 +1367,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
        struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
        bool current_entry = ui_browser__is_current_entry(browser, row);
        unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
-       const char *ev_name = event_name(evsel);
+       const char *ev_name = perf_evsel__name(evsel);
        char bf[256], unit;
        const char *warn = " ";
        size_t printed;
@@ -1240,7 +1435,7 @@ browse_hists:
                         */
                        if (timer)
                                timer(arg);
-                       ev_name = event_name(pos);
+                       ev_name = perf_evsel__name(pos);
                        key = perf_evsel__hists_browse(pos, nr_events, help,
                                                       ev_name, true, timer,
                                                       arg, delay_secs);
@@ -1309,17 +1504,11 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
        ui_helpline__push("Press ESC to exit");
 
        list_for_each_entry(pos, &evlist->entries, node) {
-               const char *ev_name = event_name(pos);
+               const char *ev_name = perf_evsel__name(pos);
                size_t line_len = strlen(ev_name) + 7;
 
                if (menu.b.width < line_len)
                        menu.b.width = line_len;
-               /*
-                * Cache the evsel name, tracepoints have a _high_ cost per
-                * event_name() call.
-                */
-               if (pos->name == NULL)
-                       pos->name = strdup(ev_name);
        }
 
        return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer,
@@ -1330,11 +1519,10 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  void(*timer)(void *arg), void *arg,
                                  int delay_secs)
 {
-
        if (evlist->nr_entries == 1) {
                struct perf_evsel *first = list_entry(evlist->entries.next,
                                                      struct perf_evsel, node);
-               const char *ev_name = event_name(first);
+               const char *ev_name = perf_evsel__name(first);
                return perf_evsel__hists_browse(first, evlist->nr_entries, help,
                                                ev_name, false, timer, arg,
                                                delay_secs);
index 0656c381a89cae7dcfd92302ea2c021ca75b517c..ec12e0b4ded6567d1c673049cf39cea0f93891f0 100644 (file)
@@ -11,8 +11,8 @@
 
 static void perf_gtk__signal(int sig)
 {
+       perf_gtk__exit(false);
        psignal(sig, "perf");
-       gtk_main_quit();
 }
 
 static void perf_gtk__resize_window(GtkWidget *window)
@@ -122,13 +122,59 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
        gtk_container_add(GTK_CONTAINER(window), view);
 }
 
+#ifdef HAVE_GTK_INFO_BAR
+static GtkWidget *perf_gtk__setup_info_bar(void)
+{
+       GtkWidget *info_bar;
+       GtkWidget *label;
+       GtkWidget *content_area;
+
+       info_bar = gtk_info_bar_new();
+       gtk_widget_set_no_show_all(info_bar, TRUE);
+
+       label = gtk_label_new("");
+       gtk_widget_show(label);
+
+       content_area = gtk_info_bar_get_content_area(GTK_INFO_BAR(info_bar));
+       gtk_container_add(GTK_CONTAINER(content_area), label);
+
+       gtk_info_bar_add_button(GTK_INFO_BAR(info_bar), GTK_STOCK_OK,
+                               GTK_RESPONSE_OK);
+       g_signal_connect(info_bar, "response",
+                        G_CALLBACK(gtk_widget_hide), NULL);
+
+       pgctx->info_bar = info_bar;
+       pgctx->message_label = label;
+
+       return info_bar;
+}
+#endif
+
+static GtkWidget *perf_gtk__setup_statusbar(void)
+{
+       GtkWidget *stbar;
+       unsigned ctxid;
+
+       stbar = gtk_statusbar_new();
+
+       ctxid = gtk_statusbar_get_context_id(GTK_STATUSBAR(stbar),
+                                            "perf report");
+       pgctx->statbar = stbar;
+       pgctx->statbar_ctx_id = ctxid;
+
+       return stbar;
+}
+
 int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
                                  const char *help __used,
                                  void (*timer) (void *arg)__used,
                                  void *arg __used, int delay_secs __used)
 {
        struct perf_evsel *pos;
+       GtkWidget *vbox;
        GtkWidget *notebook;
+       GtkWidget *info_bar;
+       GtkWidget *statbar;
        GtkWidget *window;
 
        signal(SIGSEGV, perf_gtk__signal);
@@ -143,11 +189,17 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
 
        g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
 
+       pgctx = perf_gtk__activate_context(window);
+       if (!pgctx)
+               return -1;
+
+       vbox = gtk_vbox_new(FALSE, 0);
+
        notebook = gtk_notebook_new();
 
        list_for_each_entry(pos, &evlist->entries, node) {
                struct hists *hists = &pos->hists;
-               const char *evname = event_name(pos);
+               const char *evname = perf_evsel__name(pos);
                GtkWidget *scrolled_window;
                GtkWidget *tab_label;
 
@@ -164,7 +216,16 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
                gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
        }
 
-       gtk_container_add(GTK_CONTAINER(window), notebook);
+       gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
+
+       info_bar = perf_gtk__setup_info_bar();
+       if (info_bar)
+               gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
+
+       statbar = perf_gtk__setup_statusbar();
+       gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
+
+       gtk_container_add(GTK_CONTAINER(window), vbox);
 
        gtk_widget_show_all(window);
 
@@ -174,5 +235,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
 
        gtk_main();
 
+       perf_gtk__deactivate_context(&pgctx);
+
        return 0;
 }
index 75177ee04032d06a3862acf4befd82b0b67516d3..a4d0f2b4a2dcf4c4b6bfb9b21a52aabc4b9376ca 100644 (file)
@@ -1,8 +1,39 @@
 #ifndef _PERF_GTK_H_
 #define _PERF_GTK_H_ 1
 
+#include <stdbool.h>
+
 #pragma GCC diagnostic ignored "-Wstrict-prototypes"
 #include <gtk/gtk.h>
 #pragma GCC diagnostic error "-Wstrict-prototypes"
 
+
+struct perf_gtk_context {
+       GtkWidget *main_window;
+
+#ifdef HAVE_GTK_INFO_BAR
+       GtkWidget *info_bar;
+       GtkWidget *message_label;
+#endif
+       GtkWidget *statbar;
+       guint statbar_ctx_id;
+};
+
+extern struct perf_gtk_context *pgctx;
+
+static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx)
+{
+       return ctx && ctx->main_window;
+}
+
+struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window);
+int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
+
+#ifndef HAVE_GTK_INFO_BAR
+static inline GtkWidget *perf_gtk__setup_info_bar(void)
+{
+       return NULL;
+}
+#endif
+
 #endif /* _PERF_GTK_H_ */
index 8295299577660b24b99405a22965316f04f96e98..92879ce61e2fc0059f6827c38b9ff8eaf114adba 100644 (file)
@@ -1,12 +1,17 @@
 #include "gtk.h"
 #include "../../util/cache.h"
+#include "../../util/debug.h"
+
+extern struct perf_error_ops perf_gtk_eops;
 
 int perf_gtk__init(void)
 {
+       perf_error__register(&perf_gtk_eops);
        return gtk_init_check(NULL, NULL) ? 0 : -1;
 }
 
 void perf_gtk__exit(bool wait_for_ok __used)
 {
+       perf_error__unregister(&perf_gtk_eops);
        gtk_main_quit();
 }
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
new file mode 100644 (file)
index 0000000..0ead373
--- /dev/null
@@ -0,0 +1,129 @@
+#include "../util.h"
+#include "../../util/debug.h"
+#include "gtk.h"
+
+#include <string.h>
+
+
+struct perf_gtk_context *pgctx;
+
+struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window)
+{
+       struct perf_gtk_context *ctx;
+
+       ctx = malloc(sizeof(*pgctx));
+       if (ctx)
+               ctx->main_window = window;
+
+       return ctx;
+}
+
+int perf_gtk__deactivate_context(struct perf_gtk_context **ctx)
+{
+       if (!perf_gtk__is_active_context(*ctx))
+               return -1;
+
+       free(*ctx);
+       *ctx = NULL;
+       return 0;
+}
+
+static int perf_gtk__error(const char *format, va_list args)
+{
+       char *msg;
+       GtkWidget *dialog;
+
+       if (!perf_gtk__is_active_context(pgctx) ||
+           vasprintf(&msg, format, args) < 0) {
+               fprintf(stderr, "Error:\n");
+               vfprintf(stderr, format, args);
+               fprintf(stderr, "\n");
+               return -1;
+       }
+
+       dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window),
+                                       GTK_DIALOG_DESTROY_WITH_PARENT,
+                                       GTK_MESSAGE_ERROR,
+                                       GTK_BUTTONS_CLOSE,
+                                       "<b>Error</b>\n\n%s", msg);
+       gtk_dialog_run(GTK_DIALOG(dialog));
+
+       gtk_widget_destroy(dialog);
+       free(msg);
+       return 0;
+}
+
+#ifdef HAVE_GTK_INFO_BAR
+static int perf_gtk__warning_info_bar(const char *format, va_list args)
+{
+       char *msg;
+
+       if (!perf_gtk__is_active_context(pgctx) ||
+           vasprintf(&msg, format, args) < 0) {
+               fprintf(stderr, "Warning:\n");
+               vfprintf(stderr, format, args);
+               fprintf(stderr, "\n");
+               return -1;
+       }
+
+       gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg);
+       gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar),
+                                     GTK_MESSAGE_WARNING);
+       gtk_widget_show(pgctx->info_bar);
+
+       free(msg);
+       return 0;
+}
+#else
+static int perf_gtk__warning_statusbar(const char *format, va_list args)
+{
+       char *msg, *p;
+
+       if (!perf_gtk__is_active_context(pgctx) ||
+           vasprintf(&msg, format, args) < 0) {
+               fprintf(stderr, "Warning:\n");
+               vfprintf(stderr, format, args);
+               fprintf(stderr, "\n");
+               return -1;
+       }
+
+       gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
+                         pgctx->statbar_ctx_id);
+
+       /* Only first line can be displayed */
+       p = strchr(msg, '\n');
+       if (p)
+               *p = '\0';
+
+       gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
+                          pgctx->statbar_ctx_id, msg);
+
+       free(msg);
+       return 0;
+}
+#endif
+
+struct perf_error_ops perf_gtk_eops = {
+       .error          = perf_gtk__error,
+#ifdef HAVE_GTK_INFO_BAR
+       .warning        = perf_gtk__warning_info_bar,
+#else
+       .warning        = perf_gtk__warning_statusbar,
+#endif
+};
+
+/*
+ * FIXME: Functions below should be implemented properly.
+ *        For now, just add stubs for NO_NEWT=1 build.
+ */
+#ifdef NO_NEWT_SUPPORT
+int ui_helpline__show_help(const char *format __used, va_list ap __used)
+{
+       return 0;
+}
+
+void ui_progress__update(u64 curr __used, u64 total __used,
+                        const char *title __used)
+{
+}
+#endif
index d33e943ac434608067fe54fb734cda5a02d1d991..e813c1d173463564a496bdd3917f3079089a3101 100644 (file)
@@ -15,6 +15,8 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
 
 static volatile int ui__need_resize;
 
+extern struct perf_error_ops perf_tui_eops;
+
 void ui__refresh_dimensions(bool force)
 {
        if (force || ui__need_resize) {
@@ -122,6 +124,8 @@ int ui__init(void)
        signal(SIGINT, ui__signal);
        signal(SIGQUIT, ui__signal);
        signal(SIGTERM, ui__signal);
+
+       perf_error__register(&perf_tui_eops);
 out:
        return err;
 }
@@ -137,4 +141,6 @@ void ui__exit(bool wait_for_ok)
        SLsmg_refresh();
        SLsmg_reset_smg();
        SLang_reset_tty();
+
+       perf_error__unregister(&perf_tui_eops);
 }
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
new file mode 100644 (file)
index 0000000..092902e
--- /dev/null
@@ -0,0 +1,243 @@
+#include "../../util/util.h"
+#include <signal.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/ttydefaults.h>
+
+#include "../../util/cache.h"
+#include "../../util/debug.h"
+#include "../browser.h"
+#include "../keysyms.h"
+#include "../helpline.h"
+#include "../ui.h"
+#include "../util.h"
+#include "../libslang.h"
+
+static void ui_browser__argv_write(struct ui_browser *browser,
+                                  void *entry, int row)
+{
+       char **arg = entry;
+       bool current_entry = ui_browser__is_current_entry(browser, row);
+
+       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+                                                      HE_COLORSET_NORMAL);
+       slsmg_write_nstring(*arg, browser->width);
+}
+
+static int popup_menu__run(struct ui_browser *menu)
+{
+       int key;
+
+       if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
+               return -1;
+
+       while (1) {
+               key = ui_browser__run(menu, 0);
+
+               switch (key) {
+               case K_RIGHT:
+               case K_ENTER:
+                       key = menu->index;
+                       break;
+               case K_LEFT:
+               case K_ESC:
+               case 'q':
+               case CTRL('c'):
+                       key = -1;
+                       break;
+               default:
+                       continue;
+               }
+
+               break;
+       }
+
+       ui_browser__hide(menu);
+       return key;
+}
+
+int ui__popup_menu(int argc, char * const argv[])
+{
+       struct ui_browser menu = {
+               .entries    = (void *)argv,
+               .refresh    = ui_browser__argv_refresh,
+               .seek       = ui_browser__argv_seek,
+               .write      = ui_browser__argv_write,
+               .nr_entries = argc,
+       };
+
+       return popup_menu__run(&menu);
+}
+
+int ui_browser__input_window(const char *title, const char *text, char *input,
+                            const char *exit_msg, int delay_secs)
+{
+       int x, y, len, key;
+       int max_len = 60, nr_lines = 0;
+       static char buf[50];
+       const char *t;
+
+       t = text;
+       while (1) {
+               const char *sep = strchr(t, '\n');
+
+               if (sep == NULL)
+                       sep = strchr(t, '\0');
+               len = sep - t;
+               if (max_len < len)
+                       max_len = len;
+               ++nr_lines;
+               if (*sep == '\0')
+                       break;
+               t = sep + 1;
+       }
+
+       max_len += 2;
+       nr_lines += 8;
+       y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+       x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+       SLsmg_set_color(0);
+       SLsmg_draw_box(y, x++, nr_lines, max_len);
+       if (title) {
+               SLsmg_gotorc(y, x + 1);
+               SLsmg_write_string((char *)title);
+       }
+       SLsmg_gotorc(++y, x);
+       nr_lines -= 7;
+       max_len -= 2;
+       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+                                  nr_lines, max_len, 1);
+       y += nr_lines;
+       len = 5;
+       while (len--) {
+               SLsmg_gotorc(y + len - 1, x);
+               SLsmg_write_nstring((char *)" ", max_len);
+       }
+       SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+       SLsmg_gotorc(y + 3, x);
+       SLsmg_write_nstring((char *)exit_msg, max_len);
+       SLsmg_refresh();
+
+       x += 2;
+       len = 0;
+       key = ui__getch(delay_secs);
+       while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+               if (key == K_BKSPC) {
+                       if (len == 0)
+                               goto next_key;
+                       SLsmg_gotorc(y, x + --len);
+                       SLsmg_write_char(' ');
+               } else {
+                       buf[len] = key;
+                       SLsmg_gotorc(y, x + len++);
+                       SLsmg_write_char(key);
+               }
+               SLsmg_refresh();
+
+               /* XXX more graceful overflow handling needed */
+               if (len == sizeof(buf) - 1) {
+                       ui_helpline__push("maximum size of symbol name reached!");
+                       key = K_ENTER;
+                       break;
+               }
+next_key:
+               key = ui__getch(delay_secs);
+       }
+
+       buf[len] = '\0';
+       strncpy(input, buf, len+1);
+       return key;
+}
+
+int ui__question_window(const char *title, const char *text,
+                       const char *exit_msg, int delay_secs)
+{
+       int x, y;
+       int max_len = 0, nr_lines = 0;
+       const char *t;
+
+       t = text;
+       while (1) {
+               const char *sep = strchr(t, '\n');
+               int len;
+
+               if (sep == NULL)
+                       sep = strchr(t, '\0');
+               len = sep - t;
+               if (max_len < len)
+                       max_len = len;
+               ++nr_lines;
+               if (*sep == '\0')
+                       break;
+               t = sep + 1;
+       }
+
+       max_len += 2;
+       nr_lines += 4;
+       y = SLtt_Screen_Rows / 2 - nr_lines / 2,
+       x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+       SLsmg_set_color(0);
+       SLsmg_draw_box(y, x++, nr_lines, max_len);
+       if (title) {
+               SLsmg_gotorc(y, x + 1);
+               SLsmg_write_string((char *)title);
+       }
+       SLsmg_gotorc(++y, x);
+       nr_lines -= 2;
+       max_len -= 2;
+       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+                                  nr_lines, max_len, 1);
+       SLsmg_gotorc(y + nr_lines - 2, x);
+       SLsmg_write_nstring((char *)" ", max_len);
+       SLsmg_gotorc(y + nr_lines - 1, x);
+       SLsmg_write_nstring((char *)exit_msg, max_len);
+       SLsmg_refresh();
+       return ui__getch(delay_secs);
+}
+
+int ui__help_window(const char *text)
+{
+       return ui__question_window("Help", text, "Press any key...", 0);
+}
+
+int ui__dialog_yesno(const char *msg)
+{
+       return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
+}
+
+static int __ui__warning(const char *title, const char *format, va_list args)
+{
+       char *s;
+
+       if (vasprintf(&s, format, args) > 0) {
+               int key;
+
+               pthread_mutex_lock(&ui__lock);
+               key = ui__question_window(title, s, "Press any key...", 0);
+               pthread_mutex_unlock(&ui__lock);
+               free(s);
+               return key;
+       }
+
+       fprintf(stderr, "%s\n", title);
+       vfprintf(stderr, format, args);
+       return K_ESC;
+}
+
+static int perf_tui__error(const char *format, va_list args)
+{
+       return __ui__warning("Error:", format, args);
+}
+
+static int perf_tui__warning(const char *format, va_list args)
+{
+       return __ui__warning("Warning:", format, args);
+}
+
+struct perf_error_ops perf_tui_eops = {
+       .error          = perf_tui__error,
+       .warning        = perf_tui__warning,
+};
index ad4374a16bb08b88c008ca6e79fcf4a5b15903b5..4f989774c8c674c535990dcf82ae2ba47d5971e3 100644 (file)
-#include "../util.h"
-#include <signal.h>
-#include <stdbool.h>
-#include <string.h>
-#include <sys/ttydefaults.h>
-
-#include "../cache.h"
-#include "../debug.h"
-#include "browser.h"
-#include "keysyms.h"
-#include "helpline.h"
-#include "ui.h"
 #include "util.h"
-#include "libslang.h"
-
-static void ui_browser__argv_write(struct ui_browser *browser,
-                                  void *entry, int row)
-{
-       char **arg = entry;
-       bool current_entry = ui_browser__is_current_entry(browser, row);
-
-       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
-                                                      HE_COLORSET_NORMAL);
-       slsmg_write_nstring(*arg, browser->width);
-}
-
-static int popup_menu__run(struct ui_browser *menu)
-{
-       int key;
-
-       if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
-               return -1;
+#include "../debug.h"
 
-       while (1) {
-               key = ui_browser__run(menu, 0);
-
-               switch (key) {
-               case K_RIGHT:
-               case K_ENTER:
-                       key = menu->index;
-                       break;
-               case K_LEFT:
-               case K_ESC:
-               case 'q':
-               case CTRL('c'):
-                       key = -1;
-                       break;
-               default:
-                       continue;
-               }
-
-               break;
-       }
-
-       ui_browser__hide(menu);
-       return key;
-}
 
-int ui__popup_menu(int argc, char * const argv[])
+/*
+ * Default error logging functions
+ */
+static int perf_stdio__error(const char *format, va_list args)
 {
-       struct ui_browser menu = {
-               .entries    = (void *)argv,
-               .refresh    = ui_browser__argv_refresh,
-               .seek       = ui_browser__argv_seek,
-               .write      = ui_browser__argv_write,
-               .nr_entries = argc,
-       };
-
-       return popup_menu__run(&menu);
+       fprintf(stderr, "Error:\n");
+       vfprintf(stderr, format, args);
+       return 0;
 }
 
-int ui_browser__input_window(const char *title, const char *text, char *input,
-                            const char *exit_msg, int delay_secs)
+static int perf_stdio__warning(const char *format, va_list args)
 {
-       int x, y, len, key;
-       int max_len = 60, nr_lines = 0;
-       static char buf[50];
-       const char *t;
-
-       t = text;
-       while (1) {
-               const char *sep = strchr(t, '\n');
-
-               if (sep == NULL)
-                       sep = strchr(t, '\0');
-               len = sep - t;
-               if (max_len < len)
-                       max_len = len;
-               ++nr_lines;
-               if (*sep == '\0')
-                       break;
-               t = sep + 1;
-       }
-
-       max_len += 2;
-       nr_lines += 8;
-       y = SLtt_Screen_Rows / 2 - nr_lines / 2;
-       x = SLtt_Screen_Cols / 2 - max_len / 2;
-
-       SLsmg_set_color(0);
-       SLsmg_draw_box(y, x++, nr_lines, max_len);
-       if (title) {
-               SLsmg_gotorc(y, x + 1);
-               SLsmg_write_string((char *)title);
-       }
-       SLsmg_gotorc(++y, x);
-       nr_lines -= 7;
-       max_len -= 2;
-       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
-                                  nr_lines, max_len, 1);
-       y += nr_lines;
-       len = 5;
-       while (len--) {
-               SLsmg_gotorc(y + len - 1, x);
-               SLsmg_write_nstring((char *)" ", max_len);
-       }
-       SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
-
-       SLsmg_gotorc(y + 3, x);
-       SLsmg_write_nstring((char *)exit_msg, max_len);
-       SLsmg_refresh();
-
-       x += 2;
-       len = 0;
-       key = ui__getch(delay_secs);
-       while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
-               if (key == K_BKSPC) {
-                       if (len == 0)
-                               goto next_key;
-                       SLsmg_gotorc(y, x + --len);
-                       SLsmg_write_char(' ');
-               } else {
-                       buf[len] = key;
-                       SLsmg_gotorc(y, x + len++);
-                       SLsmg_write_char(key);
-               }
-               SLsmg_refresh();
-
-               /* XXX more graceful overflow handling needed */
-               if (len == sizeof(buf) - 1) {
-                       ui_helpline__push("maximum size of symbol name reached!");
-                       key = K_ENTER;
-                       break;
-               }
-next_key:
-               key = ui__getch(delay_secs);
-       }
-
-       buf[len] = '\0';
-       strncpy(input, buf, len+1);
-       return key;
+       fprintf(stderr, "Warning:\n");
+       vfprintf(stderr, format, args);
+       return 0;
 }
 
-int ui__question_window(const char *title, const char *text,
-                       const char *exit_msg, int delay_secs)
+static struct perf_error_ops default_eops =
 {
-       int x, y;
-       int max_len = 0, nr_lines = 0;
-       const char *t;
-
-       t = text;
-       while (1) {
-               const char *sep = strchr(t, '\n');
-               int len;
-
-               if (sep == NULL)
-                       sep = strchr(t, '\0');
-               len = sep - t;
-               if (max_len < len)
-                       max_len = len;
-               ++nr_lines;
-               if (*sep == '\0')
-                       break;
-               t = sep + 1;
-       }
-
-       max_len += 2;
-       nr_lines += 4;
-       y = SLtt_Screen_Rows / 2 - nr_lines / 2,
-       x = SLtt_Screen_Cols / 2 - max_len / 2;
-
-       SLsmg_set_color(0);
-       SLsmg_draw_box(y, x++, nr_lines, max_len);
-       if (title) {
-               SLsmg_gotorc(y, x + 1);
-               SLsmg_write_string((char *)title);
-       }
-       SLsmg_gotorc(++y, x);
-       nr_lines -= 2;
-       max_len -= 2;
-       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
-                                  nr_lines, max_len, 1);
-       SLsmg_gotorc(y + nr_lines - 2, x);
-       SLsmg_write_nstring((char *)" ", max_len);
-       SLsmg_gotorc(y + nr_lines - 1, x);
-       SLsmg_write_nstring((char *)exit_msg, max_len);
-       SLsmg_refresh();
-       return ui__getch(delay_secs);
-}
+       .error          = perf_stdio__error,
+       .warning        = perf_stdio__warning,
+};
 
-int ui__help_window(const char *text)
-{
-       return ui__question_window("Help", text, "Press any key...", 0);
-}
+static struct perf_error_ops *perf_eops = &default_eops;
 
-int ui__dialog_yesno(const char *msg)
-{
-       return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
-}
 
-int __ui__warning(const char *title, const char *format, va_list args)
+int ui__error(const char *format, ...)
 {
-       char *s;
-
-       if (use_browser > 0 && vasprintf(&s, format, args) > 0) {
-               int key;
+       int ret;
+       va_list args;
 
-               pthread_mutex_lock(&ui__lock);
-               key = ui__question_window(title, s, "Press any key...", 0);
-               pthread_mutex_unlock(&ui__lock);
-               free(s);
-               return key;
-       }
+       va_start(args, format);
+       ret = perf_eops->error(format, args);
+       va_end(args);
 
-       fprintf(stderr, "%s:\n", title);
-       vfprintf(stderr, format, args);
-       return K_ESC;
+       return ret;
 }
 
 int ui__warning(const char *format, ...)
 {
-       int key;
+       int ret;
        va_list args;
 
        va_start(args, format);
-       key = __ui__warning("Warning", format, args);
+       ret = perf_eops->warning(format, args);
        va_end(args);
-       return key;
+
+       return ret;
 }
 
-int ui__error(const char *format, ...)
+
+/**
+ * perf_error__register - Register error logging functions
+ * @eops: The pointer to error logging function struct
+ *
+ * Register UI-specific error logging functions. Before calling this,
+ * other logging functions should be unregistered, if any.
+ */
+int perf_error__register(struct perf_error_ops *eops)
 {
-       int key;
-       va_list args;
+       if (perf_eops != &default_eops)
+               return -1;
 
-       va_start(args, format);
-       key = __ui__warning("Error", format, args);
-       va_end(args);
-       return key;
+       perf_eops = eops;
+       return 0;
+}
+
+/**
+ * perf_error__unregister - Unregister error logging functions
+ * @eops: The pointer to error logging function struct
+ *
+ * Unregister already registered error logging functions.
+ */
+int perf_error__unregister(struct perf_error_ops *eops)
+{
+       if (perf_eops != eops)
+               return -1;
+
+       perf_eops = &default_eops;
+       return 0;
 }
index 2d1738bd71c8adc639e7e53919f2cf988ea07ec3..361f08c52d37cba6acf8056cba96f297ceb25515 100644 (file)
@@ -9,6 +9,13 @@ int ui__help_window(const char *text);
 int ui__dialog_yesno(const char *msg);
 int ui__question_window(const char *title, const char *text,
                        const char *exit_msg, int delay_secs);
-int __ui__warning(const char *title, const char *format, va_list args);
+
+struct perf_error_ops {
+       int (*error)(const char *format, va_list args);
+       int (*warning)(const char *format, va_list args);
+};
+
+int perf_error__register(struct perf_error_ops *eops);
+int perf_error__unregister(struct perf_error_ops *eops);
 
 #endif /* _PERF_UI_UTIL_H_ */
index efb1fce259a4f2ea9ae202a9111976daf497bb53..4dfe0bb3c3225455e4f0ff3b1cb74a386352db3c 100644 (file)
@@ -47,7 +47,7 @@ int dump_printf(const char *fmt, ...)
        return ret;
 }
 
-#ifdef NO_NEWT_SUPPORT
+#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
 int ui__warning(const char *format, ...)
 {
        va_list args;
index 6bebe7f0a20c6cb6b12cac9d041203d3221f2b1a..015c91dbc096310554ba6f7bcec51c16d7cabfac 100644 (file)
@@ -12,8 +12,9 @@ int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(union perf_event *event);
 
 struct ui_progress;
+struct perf_error_ops;
 
-#ifdef NO_NEWT_SUPPORT
+#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
 static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
 {
        return 0;
@@ -23,12 +24,28 @@ static inline void ui_progress__update(u64 curr __used, u64 total __used,
                                       const char *title __used) {}
 
 #define ui__error(format, arg...) ui__warning(format, ##arg)
-#else
+
+static inline int
+perf_error__register(struct perf_error_ops *eops __used)
+{
+       return 0;
+}
+
+static inline int
+perf_error__unregister(struct perf_error_ops *eops __used)
+{
+       return 0;
+}
+
+#else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
+
 extern char ui_helpline__last_msg[];
 int ui_helpline__show_help(const char *format, va_list ap);
 #include "../ui/progress.h"
 int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
-#endif
+#include "../ui/util.h"
+
+#endif /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
 
 int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
 int ui__error_paranoid(void);
index 7400fb3fc50c91910a51eb3dc6a679ca6e4a05de..f74e9560350eb3dccfe7cc47c6a13e6256f91bb4 100644 (file)
@@ -224,8 +224,8 @@ out_free_attrs:
        return err;
 }
 
-static struct perf_evsel *
-       perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 {
        struct perf_evsel *evsel;
 
index 989bee9624c23c66e002b7a245c89c7114f3b219..40d4d3cdced0e081ec41863abe6ab46c562a3e3a 100644 (file)
@@ -73,6 +73,9 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
 #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
        perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
 
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+
 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
                         int cpu, int thread, u64 id);
 
index 9f6cebd798eed5ee44ea4510305b93b4f2e3fd6b..3d1f6968f1751e3f8d28d12a636a79d0793b0ea5 100644 (file)
@@ -15,7 +15,6 @@
 #include "cpumap.h"
 #include "thread_map.h"
 #include "target.h"
-#include "../../include/linux/perf_event.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -78,7 +77,7 @@ static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
        "ref-cycles",
 };
 
-const char *__perf_evsel__hw_name(u64 config)
+static const char *__perf_evsel__hw_name(u64 config)
 {
        if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
                return perf_evsel__hw_names[config];
@@ -86,16 +85,15 @@ const char *__perf_evsel__hw_name(u64 config)
        return "unknown-hardware";
 }
 
-static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
 {
-       int colon = 0;
+       int colon = 0, r = 0;
        struct perf_event_attr *attr = &evsel->attr;
-       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
        bool exclude_guest_default = false;
 
 #define MOD_PRINT(context, mod)        do {                                    \
                if (!attr->exclude_##context) {                         \
-                       if (!colon) colon = r++;                        \
+                       if (!colon) colon = ++r;                        \
                        r += scnprintf(bf + r, size - r, "%c", mod);    \
                } } while(0)
 
@@ -108,7 +106,7 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
 
        if (attr->precise_ip) {
                if (!colon)
-                       colon = r++;
+                       colon = ++r;
                r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
                exclude_guest_default = true;
        }
@@ -119,39 +117,182 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
        }
 #undef MOD_PRINT
        if (colon)
-               bf[colon] = ':';
+               bf[colon - 1] = ':';
        return r;
 }
 
-int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
+       return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+       "cpu-clock",
+       "task-clock",
+       "page-faults",
+       "context-switches",
+       "CPU-migrations",
+       "minor-faults",
+       "major-faults",
+       "alignment-faults",
+       "emulation-faults",
+};
+
+static const char *__perf_evsel__sw_name(u64 config)
+{
+       if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
+               return perf_evsel__sw_names[config];
+       return "unknown-software";
+}
+
+static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
+       return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_EVSEL__MAX_ALIASES] = {
+ { "L1-dcache",        "l1-d",         "l1d",          "L1-data",              },
+ { "L1-icache",        "l1-i",         "l1i",          "L1-instruction",       },
+ { "LLC",      "L2",                                                   },
+ { "dTLB",     "d-tlb",        "Data-TLB",                             },
+ { "iTLB",     "i-tlb",        "Instruction-TLB",                      },
+ { "branch",   "branches",     "bpu",          "btb",          "bpc",  },
+ { "node",                                                             },
+};
+
+const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+                                  [PERF_EVSEL__MAX_ALIASES] = {
+ { "load",     "loads",        "read",                                 },
+ { "store",    "stores",       "write",                                },
+ { "prefetch", "prefetches",   "speculative-read", "speculative-load", },
+};
+
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+                                      [PERF_EVSEL__MAX_ALIASES] = {
+ { "refs",     "Reference",    "ops",          "access",               },
+ { "misses",   "miss",                                                 },
+};
+
+#define C(x)           PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ     (1 << C(OP_READ))
+#define CACHE_WRITE    (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x)         (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
+ [C(L1D)]      = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)]      = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)]       = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)]     = (CACHE_READ),
+ [C(BPU)]      = (CACHE_READ),
+ [C(NODE)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+};
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
+{
+       if (perf_evsel__hw_cache_stat[type] & COP(op))
+               return true;    /* valid */
+       else
+               return false;   /* invalid */
+}
+
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+                                           char *bf, size_t size)
+{
+       if (result) {
+               return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
+                                perf_evsel__hw_cache_op[op][0],
+                                perf_evsel__hw_cache_result[result][0]);
+       }
+
+       return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
+                        perf_evsel__hw_cache_op[op][1]);
+}
+
+static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
 {
-       int ret;
+       u8 op, result, type = (config >>  0) & 0xff;
+       const char *err = "unknown-ext-hardware-cache-type";
+
+       if (type > PERF_COUNT_HW_CACHE_MAX)
+               goto out_err;
+
+       op = (config >>  8) & 0xff;
+       err = "unknown-ext-hardware-cache-op";
+       if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+               goto out_err;
+
+       result = (config >> 16) & 0xff;
+       err = "unknown-ext-hardware-cache-result";
+       if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+               goto out_err;
+
+       err = "invalid-cache";
+       if (!perf_evsel__is_cache_op_valid(type, op))
+               goto out_err;
+
+       return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
+out_err:
+       return scnprintf(bf, size, "%s", err);
+}
+
+static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
+       return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+       return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+const char *perf_evsel__name(struct perf_evsel *evsel)
+{
+       char bf[128];
+
+       if (evsel->name)
+               return evsel->name;
 
        switch (evsel->attr.type) {
        case PERF_TYPE_RAW:
-               ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+               perf_evsel__raw_name(evsel, bf, sizeof(bf));
                break;
 
        case PERF_TYPE_HARDWARE:
-               ret = perf_evsel__hw_name(evsel, bf, size);
+               perf_evsel__hw_name(evsel, bf, sizeof(bf));
+               break;
+
+       case PERF_TYPE_HW_CACHE:
+               perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
+               break;
+
+       case PERF_TYPE_SOFTWARE:
+               perf_evsel__sw_name(evsel, bf, sizeof(bf));
                break;
+
+       case PERF_TYPE_TRACEPOINT:
+               scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
+               break;
+
        default:
-               /*
-                * FIXME
-                *
-                * This is the minimal perf_evsel__name so that we can
-                * reconstruct event names taking into account event modifiers.
-                *
-                * The old event_name uses it now for raw anr hw events, so that
-                * we don't drag all the parsing stuff into the python binding.
-                *
-                * On the next devel cycle the rest of the event naming will be
-                * brought here.
-                */
-               return 0;
-       }
-
-       return ret;
+               scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
+               break;
+       }
+
+       evsel->name = strdup(bf);
+
+       return evsel->name ?: "unknown";
 }
 
 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
index 4ba8b564e6f47f039652ebac739d9d899d5881f5..67cc5033d19234e28071523f51ecf7c7186822b1 100644 (file)
@@ -83,8 +83,19 @@ void perf_evsel__config(struct perf_evsel *evsel,
                        struct perf_record_opts *opts,
                        struct perf_evsel *first);
 
-const char* __perf_evsel__hw_name(u64 config);
-int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
+
+#define PERF_EVSEL__MAX_ALIASES 8
+
+extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+                                      [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+                                         [PERF_EVSEL__MAX_ALIASES];
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+                                      [PERF_EVSEL__MAX_ALIASES];
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+                                           char *bf, size_t size);
+const char *perf_evsel__name(struct perf_evsel *evsel);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
index e909d43cf5422333e4d47d2b412c1f3fa0b636af..5a47aba46759274f7f542e533bae4a62749d565a 100644 (file)
@@ -641,7 +641,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, event_name(attr));
+               ret = do_write_string(fd, perf_evsel__name(attr));
                if (ret < 0)
                        return ret;
                /*
@@ -1474,15 +1474,15 @@ out:
 
 static int process_tracing_data(struct perf_file_section *section __unused,
                              struct perf_header *ph __unused,
-                             int feat __unused, int fd)
+                             int feat __unused, int fd, void *data)
 {
-       trace_report(fd, false);
+       trace_report(fd, data, false);
        return 0;
 }
 
 static int process_build_id(struct perf_file_section *section,
                            struct perf_header *ph,
-                           int feat __unused, int fd)
+                           int feat __unused, int fd, void *data __used)
 {
        if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
                pr_debug("Failed to read buildids, continuing...\n");
@@ -1493,7 +1493,7 @@ struct feature_ops {
        int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
        void (*print)(struct perf_header *h, int fd, FILE *fp);
        int (*process)(struct perf_file_section *section,
-                      struct perf_header *h, int feat, int fd);
+                      struct perf_header *h, int feat, int fd, void *data);
        const char *name;
        bool full_only;
 };
@@ -1988,7 +1988,7 @@ int perf_file_header__read(struct perf_file_header *header,
 
 static int perf_file_section__process(struct perf_file_section *section,
                                      struct perf_header *ph,
-                                     int feat, int fd, void *data __used)
+                                     int feat, int fd, void *data)
 {
        if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
                pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
@@ -2004,7 +2004,7 @@ static int perf_file_section__process(struct perf_file_section *section,
        if (!feat_ops[feat].process)
                return 0;
 
-       return feat_ops[feat].process(section, ph, feat, fd);
+       return feat_ops[feat].process(section, ph, feat, fd, data);
 }
 
 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
@@ -2093,9 +2093,11 @@ static int read_attr(int fd, struct perf_header *ph,
        return ret <= 0 ? -1 : 0;
 }
 
-static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
+static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel,
+                                          struct pevent *pevent)
 {
-       struct event_format *event = trace_find_event(evsel->attr.config);
+       struct event_format *event = pevent_find_event(pevent,
+                                                      evsel->attr.config);
        char bf[128];
 
        if (event == NULL)
@@ -2109,13 +2111,14 @@ static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
        return 0;
 }
 
-static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist)
+static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist,
+                                            struct pevent *pevent)
 {
        struct perf_evsel *pos;
 
        list_for_each_entry(pos, &evlist->entries, node) {
                if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
-                   perf_evsel__set_tracepoint_name(pos))
+                   perf_evsel__set_tracepoint_name(pos, pevent))
                        return -1;
        }
 
@@ -2198,12 +2201,12 @@ int perf_session__read_header(struct perf_session *session, int fd)
                event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
        }
 
-       perf_header__process_sections(header, fd, NULL,
+       perf_header__process_sections(header, fd, &session->pevent,
                                      perf_file_section__process);
 
        lseek(fd, header->data_offset, SEEK_SET);
 
-       if (perf_evlist__set_tracepoint_names(session->evlist))
+       if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent))
                goto out_delete_evlist;
 
        header->frozen = 1;
@@ -2419,8 +2422,8 @@ int perf_event__process_tracing_data(union perf_event *event,
        lseek(session->fd, offset + sizeof(struct tracing_data_event),
              SEEK_SET);
 
-       size_read = trace_report(session->fd, session->repipe);
-
+       size_read = trace_report(session->fd, &session->pevent,
+                                session->repipe);
        padding = ALIGN(size_read, sizeof(u64)) - size_read;
 
        if (read(session->fd, buf, padding) < 0)
index 34bb556d62191a1300b295011be6210f9633fcf7..0b096c27a419bb46969e75fe8590f65dbde149be 100644 (file)
@@ -47,6 +47,7 @@ enum hist_column {
        HISTC_SYMBOL_TO,
        HISTC_DSO_FROM,
        HISTC_DSO_TO,
+       HISTC_SRCLINE,
        HISTC_NR_COLS, /* Last entry */
 };
 
index 81371bad4ef0e43e1121b5b8c4367fe4d6b212d8..c14c665d9a259c8c5c9aed791a32f2096eff6bb0 100644 (file)
@@ -157,7 +157,7 @@ void machine__exit(struct machine *self);
 void machine__delete(struct machine *self);
 
 int machine__resolve_callchain(struct machine *machine,
-                              struct perf_evsel *evsel, struct thread *thread,
+                              struct thread *thread,
                               struct ip_callchain *chain,
                               struct symbol **parent);
 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
index 76b98e2a587d7c8a52a1264d88aad3bdb6839831..a0f61a2a68359d49bb08f6d7fa3b074bab963abd 100644 (file)
@@ -413,19 +413,63 @@ static int test__checkevent_pmu_name(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       /* cpu/config=1,name=krava1/u */
+       /* cpu/config=1,name=krava/u */
        evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
        TEST_ASSERT_VAL("wrong config",  1 == evsel->attr.config);
-       TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "krava"));
+       TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava"));
 
-       /* cpu/config=2/" */
+       /* cpu/config=2/u" */
        evsel = list_entry(evsel->node.next, struct perf_evsel, node);
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
        TEST_ASSERT_VAL("wrong config",  2 == evsel->attr.config);
-       TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "raw 0x2"));
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "raw 0x2:u"));
+
+       return 0;
+}
+
+static int test__checkterms_simple(struct list_head *terms)
+{
+       struct parse_events__term *term;
+
+       /* config=10 */
+       term = list_entry(terms->next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 10);
+       TEST_ASSERT_VAL("wrong config", !term->config);
+
+       /* config1 */
+       term = list_entry(term->list.next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+       TEST_ASSERT_VAL("wrong config", !term->config);
+
+       /* config2=3 */
+       term = list_entry(term->list.next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 3);
+       TEST_ASSERT_VAL("wrong config", !term->config);
+
+       /* umask=1*/
+       term = list_entry(term->list.next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+       TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask"));
 
        return 0;
 }
@@ -559,7 +603,23 @@ static struct test__event_st test__events_pmu[] = {
 #define TEST__EVENTS_PMU_CNT (sizeof(test__events_pmu) / \
                              sizeof(struct test__event_st))
 
-static int test(struct test__event_st *e)
+struct test__term {
+       const char *str;
+       __u32 type;
+       int (*check)(struct list_head *terms);
+};
+
+static struct test__term test__terms[] = {
+       [0] = {
+               .str   = "config=10,config1,config2=3,umask=1",
+               .check = test__checkterms_simple,
+       },
+};
+
+#define TEST__TERMS_CNT (sizeof(test__terms) / \
+                        sizeof(struct test__term))
+
+static int test_event(struct test__event_st *e)
 {
        struct perf_evlist *evlist;
        int ret;
@@ -590,7 +650,48 @@ static int test_events(struct test__event_st *events, unsigned cnt)
                struct test__event_st *e = &events[i];
 
                pr_debug("running test %d '%s'\n", i, e->name);
-               ret = test(e);
+               ret = test_event(e);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int test_term(struct test__term *t)
+{
+       struct list_head *terms;
+       int ret;
+
+       terms = malloc(sizeof(*terms));
+       if (!terms)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(terms);
+
+       ret = parse_events_terms(terms, t->str);
+       if (ret) {
+               pr_debug("failed to parse terms '%s', err %d\n",
+                        t->str , ret);
+               return ret;
+       }
+
+       ret = t->check(terms);
+       parse_events__free_terms(terms);
+
+       return ret;
+}
+
+static int test_terms(struct test__term *terms, unsigned cnt)
+{
+       int ret = 0;
+       unsigned i;
+
+       for (i = 0; i < cnt; i++) {
+               struct test__term *t = &terms[i];
+
+               pr_debug("running test %d '%s'\n", i, t->str);
+               ret = test_term(t);
                if (ret)
                        break;
        }
@@ -617,9 +718,21 @@ int parse_events__test(void)
 {
        int ret;
 
-       ret = test_events(test__events, TEST__EVENTS_CNT);
-       if (!ret && test_pmu())
-               ret = test_events(test__events_pmu, TEST__EVENTS_PMU_CNT);
+       do {
+               ret = test_events(test__events, TEST__EVENTS_CNT);
+               if (ret)
+                       break;
+
+               if (test_pmu()) {
+                       ret = test_events(test__events_pmu,
+                                         TEST__EVENTS_PMU_CNT);
+                       if (ret)
+                               break;
+               }
+
+               ret = test_terms(test__terms, TEST__TERMS_CNT);
+
+       } while (0);
 
        return ret;
 }
index 05dbc8b3c767217ceb3204c7be46f5aedbb205c4..0cc27da30ddbb6cca55a6acc07662ac2f6c6e82a 100644 (file)
@@ -11,6 +11,8 @@
 #include "cache.h"
 #include "header.h"
 #include "debugfs.h"
+#include "parse-events-bison.h"
+#define YY_EXTRA_TYPE int
 #include "parse-events-flex.h"
 #include "pmu.h"
 
@@ -26,7 +28,7 @@ struct event_symbol {
 #ifdef PARSER_DEBUG
 extern int parse_events_debug;
 #endif
-int parse_events_parse(struct list_head *list, int *idx);
+int parse_events_parse(void *data, void *scanner);
 
 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -62,63 +64,6 @@ static struct event_symbol event_symbols[] = {
 #define PERF_EVENT_TYPE(config)                __PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)          __PERF_EVENT_FIELD(config, EVENT)
 
-static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
-       "cpu-clock",
-       "task-clock",
-       "page-faults",
-       "context-switches",
-       "CPU-migrations",
-       "minor-faults",
-       "major-faults",
-       "alignment-faults",
-       "emulation-faults",
-};
-
-#define MAX_ALIASES 8
-
-static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
- { "L1-dcache",        "l1-d",         "l1d",          "L1-data",              },
- { "L1-icache",        "l1-i",         "l1i",          "L1-instruction",       },
- { "LLC",      "L2",                                                   },
- { "dTLB",     "d-tlb",        "Data-TLB",                             },
- { "iTLB",     "i-tlb",        "Instruction-TLB",                      },
- { "branch",   "branches",     "bpu",          "btb",          "bpc",  },
- { "node",                                                             },
-};
-
-static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
- { "load",     "loads",        "read",                                 },
- { "store",    "stores",       "write",                                },
- { "prefetch", "prefetches",   "speculative-read", "speculative-load", },
-};
-
-static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
-                                 [MAX_ALIASES] = {
- { "refs",     "Reference",    "ops",          "access",               },
- { "misses",   "miss",                                                 },
-};
-
-#define C(x)           PERF_COUNT_HW_CACHE_##x
-#define CACHE_READ     (1 << C(OP_READ))
-#define CACHE_WRITE    (1 << C(OP_WRITE))
-#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
-#define COP(x)         (1 << x)
-
-/*
- * cache operartion stat
- * L1I : Read and prefetch only
- * ITLB and BPU : Read-only
- */
-static unsigned long hw_cache_stat[C(MAX)] = {
- [C(L1D)]      = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(L1I)]      = (CACHE_READ | CACHE_PREFETCH),
- [C(LL)]       = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(DTLB)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(ITLB)]     = (CACHE_READ),
- [C(BPU)]      = (CACHE_READ),
- [C(NODE)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
-};
-
 #define for_each_subsystem(sys_dir, sys_dirent, sys_next)             \
        while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)        \
        if (sys_dirent.d_type == DT_DIR &&                                     \
@@ -218,48 +163,6 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
        return NULL;
 }
 
-#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
-static const char *tracepoint_id_to_name(u64 config)
-{
-       static char buf[TP_PATH_LEN];
-       struct tracepoint_path *path;
-
-       path = tracepoint_id_to_path(config);
-       if (path) {
-               snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
-               free(path->name);
-               free(path->system);
-               free(path);
-       } else
-               snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
-
-       return buf;
-}
-
-static int is_cache_op_valid(u8 cache_type, u8 cache_op)
-{
-       if (hw_cache_stat[cache_type] & COP(cache_op))
-               return 1;       /* valid */
-       else
-               return 0;       /* invalid */
-}
-
-static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
-{
-       static char name[50];
-
-       if (cache_result) {
-               sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
-                       hw_cache_op[cache_op][0],
-                       hw_cache_result[cache_result][0]);
-       } else {
-               sprintf(name, "%s-%s", hw_cache[cache_type][0],
-                       hw_cache_op[cache_op][1]);
-       }
-
-       return name;
-}
-
 const char *event_type(int type)
 {
        switch (type) {
@@ -282,76 +185,6 @@ const char *event_type(int type)
        return "unknown";
 }
 
-const char *event_name(struct perf_evsel *evsel)
-{
-       u64 config = evsel->attr.config;
-       int type = evsel->attr.type;
-
-       if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
-               /*
-                * XXX minimal fix, see comment on perf_evsen__name, this static buffer
-                * will go away together with event_name in the next devel cycle.
-                */
-               static char bf[128];
-               perf_evsel__name(evsel, bf, sizeof(bf));
-               return bf;
-       }
-
-       if (evsel->name)
-               return evsel->name;
-
-       return __event_name(type, config);
-}
-
-const char *__event_name(int type, u64 config)
-{
-       static char buf[32];
-
-       if (type == PERF_TYPE_RAW) {
-               sprintf(buf, "raw 0x%" PRIx64, config);
-               return buf;
-       }
-
-       switch (type) {
-       case PERF_TYPE_HARDWARE:
-               return __perf_evsel__hw_name(config);
-
-       case PERF_TYPE_HW_CACHE: {
-               u8 cache_type, cache_op, cache_result;
-
-               cache_type   = (config >>  0) & 0xff;
-               if (cache_type > PERF_COUNT_HW_CACHE_MAX)
-                       return "unknown-ext-hardware-cache-type";
-
-               cache_op     = (config >>  8) & 0xff;
-               if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
-                       return "unknown-ext-hardware-cache-op";
-
-               cache_result = (config >> 16) & 0xff;
-               if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
-                       return "unknown-ext-hardware-cache-result";
-
-               if (!is_cache_op_valid(cache_type, cache_op))
-                       return "invalid-cache";
-
-               return event_cache_name(cache_type, cache_op, cache_result);
-       }
-
-       case PERF_TYPE_SOFTWARE:
-               if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
-                       return sw_event_names[config];
-               return "unknown-software";
-
-       case PERF_TYPE_TRACEPOINT:
-               return tracepoint_id_to_name(config);
-
-       default:
-               break;
-       }
-
-       return "unknown";
-}
-
 static int add_event(struct list_head **_list, int *idx,
                     struct perf_event_attr *attr, char *name)
 {
@@ -373,19 +206,20 @@ static int add_event(struct list_head **_list, int *idx,
                return -ENOMEM;
        }
 
-       evsel->name = strdup(name);
+       if (name)
+               evsel->name = strdup(name);
        list_add_tail(&evsel->node, list);
        *_list = list;
        return 0;
 }
 
-static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
+static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
 {
        int i, j;
        int n, longest = -1;
 
        for (i = 0; i < size; i++) {
-               for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+               for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
                        n = strlen(names[i][j]);
                        if (n > longest && !strncasecmp(str, names[i][j], n))
                                longest = n;
@@ -410,7 +244,7 @@ int parse_events_add_cache(struct list_head **list, int *idx,
         * No fallback - if we cannot get a clear cache type
         * then bail out:
         */
-       cache_type = parse_aliases(type, hw_cache,
+       cache_type = parse_aliases(type, perf_evsel__hw_cache,
                                   PERF_COUNT_HW_CACHE_MAX);
        if (cache_type == -1)
                return -EINVAL;
@@ -423,18 +257,18 @@ int parse_events_add_cache(struct list_head **list, int *idx,
                snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
 
                if (cache_op == -1) {
-                       cache_op = parse_aliases(str, hw_cache_op,
+                       cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
                                                 PERF_COUNT_HW_CACHE_OP_MAX);
                        if (cache_op >= 0) {
-                               if (!is_cache_op_valid(cache_type, cache_op))
+                               if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
                                        return -EINVAL;
                                continue;
                        }
                }
 
                if (cache_result == -1) {
-                       cache_result = parse_aliases(str, hw_cache_result,
-                                               PERF_COUNT_HW_CACHE_RESULT_MAX);
+                       cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
+                                                    PERF_COUNT_HW_CACHE_RESULT_MAX);
                        if (cache_result >= 0)
                                continue;
                }
@@ -666,8 +500,7 @@ int parse_events_add_numeric(struct list_head **list, int *idx,
            config_attr(&attr, head_config, 1))
                return -EINVAL;
 
-       return add_event(list, idx, &attr,
-                        (char *) __event_name(type, config));
+       return add_event(list, idx, &attr, NULL);
 }
 
 static int parse_events__is_name_term(struct parse_events__term *term)
@@ -675,8 +508,7 @@ static int parse_events__is_name_term(struct parse_events__term *term)
        return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
 }
 
-static char *pmu_event_name(struct perf_event_attr *attr,
-                           struct list_head *head_terms)
+static char *pmu_event_name(struct list_head *head_terms)
 {
        struct parse_events__term *term;
 
@@ -684,7 +516,7 @@ static char *pmu_event_name(struct perf_event_attr *attr,
                if (parse_events__is_name_term(term))
                        return term->val.str;
 
-       return (char *) __event_name(PERF_TYPE_RAW, attr->config);
+       return NULL;
 }
 
 int parse_events_add_pmu(struct list_head **list, int *idx,
@@ -699,6 +531,9 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
 
        memset(&attr, 0, sizeof(attr));
 
+       if (perf_pmu__check_alias(pmu, head_config))
+               return -EINVAL;
+
        /*
         * Configure hardcoded terms first, no need to check
         * return value when called with fail == 0 ;)
@@ -709,7 +544,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
                return -EINVAL;
 
        return add_event(list, idx, &attr,
-                        pmu_event_name(&attr, head_config));
+                        pmu_event_name(head_config));
 }
 
 void parse_events_update_lists(struct list_head *list_event,
@@ -787,27 +622,62 @@ int parse_events_modifier(struct list_head *list, char *str)
        return 0;
 }
 
-int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+static int parse_events__scanner(const char *str, void *data, int start_token)
 {
-       LIST_HEAD(list);
-       LIST_HEAD(list_tmp);
        YY_BUFFER_STATE buffer;
-       int ret, idx = evlist->nr_entries;
+       void *scanner;
+       int ret;
 
-       buffer = parse_events__scan_string(str);
+       ret = parse_events_lex_init_extra(start_token, &scanner);
+       if (ret)
+               return ret;
+
+       buffer = parse_events__scan_string(str, scanner);
 
 #ifdef PARSER_DEBUG
        parse_events_debug = 1;
 #endif
-       ret = parse_events_parse(&list, &idx);
+       ret = parse_events_parse(data, scanner);
 
-       parse_events__flush_buffer(buffer);
-       parse_events__delete_buffer(buffer);
-       parse_events_lex_destroy();
+       parse_events__flush_buffer(buffer, scanner);
+       parse_events__delete_buffer(buffer, scanner);
+       parse_events_lex_destroy(scanner);
+       return ret;
+}
+
+/*
+ * parse event config string, return a list of event terms.
+ */
+int parse_events_terms(struct list_head *terms, const char *str)
+{
+       struct parse_events_data__terms data = {
+               .terms = NULL,
+       };
+       int ret;
+
+       ret = parse_events__scanner(str, &data, PE_START_TERMS);
+       if (!ret) {
+               list_splice(data.terms, terms);
+               free(data.terms);
+               return 0;
+       }
+
+       parse_events__free_terms(data.terms);
+       return ret;
+}
+
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+{
+       struct parse_events_data__events data = {
+               .list = LIST_HEAD_INIT(data.list),
+               .idx  = evlist->nr_entries,
+       };
+       int ret;
 
+       ret = parse_events__scanner(str, &data, PE_START_EVENTS);
        if (!ret) {
-               int entries = idx - evlist->nr_entries;
-               perf_evlist__splice_list_tail(evlist, &list, entries);
+               int entries = data.idx - evlist->nr_entries;
+               perf_evlist__splice_list_tail(evlist, &data.list, entries);
                return 0;
        }
 
@@ -970,16 +840,17 @@ void print_events_type(u8 type)
 int print_hwcache_events(const char *event_glob)
 {
        unsigned int type, op, i, printed = 0;
+       char name[64];
 
        for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
                for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
                        /* skip invalid cache type */
-                       if (!is_cache_op_valid(type, op))
+                       if (!perf_evsel__is_cache_op_valid(type, op))
                                continue;
 
                        for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
-                               char *name = event_cache_name(type, op, i);
-
+                               __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+                                                                       name, sizeof(name));
                                if (event_glob != NULL && !strglobmatch(name, event_glob))
                                        continue;
 
@@ -1106,6 +977,13 @@ int parse_events__term_str(struct parse_events__term **term,
                        config, str, 0);
 }
 
+int parse_events__term_clone(struct parse_events__term **new,
+                            struct parse_events__term *term)
+{
+       return new_term(new, term->type_val, term->type_term, term->config,
+                       term->val.str, term->val.num);
+}
+
 void parse_events__free_terms(struct list_head *terms)
 {
        struct parse_events__term *term, *h;
index 8cac57ab4ee6b396549a4ee857486effedb9343b..ee9c218a193c8d8c8b20f893da976d262931e172 100644 (file)
@@ -26,13 +26,12 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
 extern bool have_tracepoints(struct list_head *evlist);
 
 const char *event_type(int type);
-const char *event_name(struct perf_evsel *event);
-extern const char *__event_name(int type, u64 config);
 
 extern int parse_events_option(const struct option *opt, const char *str,
                               int unset);
 extern int parse_events(struct perf_evlist *evlist, const char *str,
                        int unset);
+extern int parse_events_terms(struct list_head *terms, const char *str);
 extern int parse_filter(const struct option *opt, const char *str, int unset);
 
 #define EVENTS_HELP_MAX (128*1024)
@@ -63,11 +62,22 @@ struct parse_events__term {
        struct list_head list;
 };
 
+struct parse_events_data__events {
+       struct list_head list;
+       int idx;
+};
+
+struct parse_events_data__terms {
+       struct list_head *terms;
+};
+
 int parse_events__is_hardcoded_term(struct parse_events__term *term);
 int parse_events__term_num(struct parse_events__term **_term,
                           int type_term, char *config, long num);
 int parse_events__term_str(struct parse_events__term **_term,
                           int type_term, char *config, char *str);
+int parse_events__term_clone(struct parse_events__term **new,
+                            struct parse_events__term *term);
 void parse_events__free_terms(struct list_head *terms);
 int parse_events_modifier(struct list_head *list, char *str);
 int parse_events_add_tracepoint(struct list_head **list, int *idx,
@@ -83,8 +93,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
                         char *pmu , struct list_head *head_config);
 void parse_events_update_lists(struct list_head *list_event,
                               struct list_head *list_all);
-void parse_events_error(struct list_head *list_all,
-                       int *idx, char const *msg);
+void parse_events_error(void *data, void *scanner, char const *msg);
 int parse_events__test(void);
 
 void print_events(const char *event_glob);
index 618a8e7883995bbee72f4b90c6d5e55a012f05f1..488362e14133bd4006886858f841828898e531a5 100644 (file)
@@ -1,4 +1,6 @@
 
+%option reentrant
+%option bison-bridge
 %option prefix="parse_events_"
 %option stack
 
@@ -8,7 +10,10 @@
 #include "parse-events-bison.h"
 #include "parse-events.h"
 
-static int __value(char *str, int base, int token)
+char *parse_events_get_text(yyscan_t yyscanner);
+YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
+
+static int __value(YYSTYPE *yylval, char *str, int base, int token)
 {
        long num;
 
@@ -17,35 +22,48 @@ static int __value(char *str, int base, int token)
        if (errno)
                return PE_ERROR;
 
-       parse_events_lval.num = num;
+       yylval->num = num;
        return token;
 }
 
-static int value(int base)
+static int value(yyscan_t scanner, int base)
 {
-       return __value(parse_events_text, base, PE_VALUE);
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       return __value(yylval, text, base, PE_VALUE);
 }
 
-static int raw(void)
+static int raw(yyscan_t scanner)
 {
-       return __value(parse_events_text + 1, 16, PE_RAW);
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       return __value(yylval, text + 1, 16, PE_RAW);
 }
 
-static int str(int token)
+static int str(yyscan_t scanner, int token)
 {
-       parse_events_lval.str = strdup(parse_events_text);
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       yylval->str = strdup(text);
        return token;
 }
 
-static int sym(int type, int config)
+static int sym(yyscan_t scanner, int type, int config)
 {
-       parse_events_lval.num = (type << 16) + config;
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+       yylval->num = (type << 16) + config;
        return PE_VALUE_SYM;
 }
 
-static int term(int type)
+static int term(yyscan_t scanner, int type)
 {
-       parse_events_lval.num = type;
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+       yylval->num = type;
        return PE_TERM;
 }
 
@@ -61,25 +79,38 @@ modifier_event      [ukhpGH]{1,8}
 modifier_bp    [rwx]
 
 %%
-cpu-cycles|cycles                              { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
-stalled-cycles-frontend|idle-cycles-frontend   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
-stalled-cycles-backend|idle-cycles-backend     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
-instructions                                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
-cache-references                               { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
-cache-misses                                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
-branch-instructions|branches                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
-branch-misses                                  { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
-bus-cycles                                     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
-ref-cycles                                     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
-cpu-clock                                      { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
-task-clock                                     { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
-page-faults|faults                             { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
-minor-faults                                   { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
-major-faults                                   { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
-context-switches|cs                            { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
-cpu-migrations|migrations                      { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
-alignment-faults                               { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
-emulation-faults                               { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+%{
+       {
+               int start_token;
+
+               start_token = (int) parse_events_get_extra(yyscanner);
+               if (start_token) {
+                       parse_events_set_extra(NULL, yyscanner);
+                       return start_token;
+               }
+         }
+%}
+
+cpu-cycles|cycles                              { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend     { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions                                   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references                               { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses                                   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches                   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses                                  { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles                                     { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles                                     { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock                                      { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock                                     { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults                             { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults                                   { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults                                   { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs                            { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations                      { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults                               { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults                               { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
 
 L1-dcache|l1-d|l1d|L1-data             |
 L1-icache|l1-i|l1i|L1-instruction      |
@@ -87,14 +118,14 @@ LLC|L2                                     |
 dTLB|d-tlb|Data-TLB                    |
 iTLB|i-tlb|Instruction-TLB             |
 branch|branches|bpu|btb|bpc            |
-node                                   { return str(PE_NAME_CACHE_TYPE); }
+node                                   { return str(yyscanner, PE_NAME_CACHE_TYPE); }
 
 load|loads|read                                |
 store|stores|write                     |
 prefetch|prefetches                    |
 speculative-read|speculative-load      |
 refs|Reference|ops|access              |
-misses|miss                            { return str(PE_NAME_CACHE_OP_RESULT); }
+misses|miss                            { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); }
 
        /*
         * These are event config hardcoded term names to be specified
@@ -102,20 +133,20 @@ misses|miss                               { return str(PE_NAME_CACHE_OP_RESULT); }
         * so we can put them here directly. In case the we have a conflict
         * in future, this needs to go into '//' condition block.
         */
-config                 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
-config1                        { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
-config2                        { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
-name                   { return term(PARSE_EVENTS__TERM_TYPE_NAME); }
-period                 { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
-branch_type            { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+config                 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+name                   { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
+period                 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type            { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
 
 mem:                   { BEGIN(mem); return PE_PREFIX_MEM; }
-r{num_raw_hex}         { return raw(); }
-{num_dec}              { return value(10); }
-{num_hex}              { return value(16); }
+r{num_raw_hex}         { return raw(yyscanner); }
+{num_dec}              { return value(yyscanner, 10); }
+{num_hex}              { return value(yyscanner, 16); }
 
-{modifier_event}       { return str(PE_MODIFIER_EVENT); }
-{name}                 { return str(PE_NAME); }
+{modifier_event}       { return str(yyscanner, PE_MODIFIER_EVENT); }
+{name}                 { return str(yyscanner, PE_NAME); }
 "/"                    { return '/'; }
 -                      { return '-'; }
 ,                      { return ','; }
@@ -123,17 +154,17 @@ r{num_raw_hex}            { return raw(); }
 =                      { return '='; }
 
 <mem>{
-{modifier_bp}          { return str(PE_MODIFIER_BP); }
+{modifier_bp}          { return str(yyscanner, PE_MODIFIER_BP); }
 :                      { return ':'; }
-{num_dec}              { return value(10); }
-{num_hex}              { return value(16); }
+{num_dec}              { return value(yyscanner, 10); }
+{num_hex}              { return value(yyscanner, 16); }
        /*
         * We need to separate 'mem:' scanner part, in order to get specific
         * modifier bits parsed out. Otherwise we would need to handle PE_NAME
         * and we'd need to parse it manually. During the escape from <mem>
         * state we need to put the escaping char back, so we dont miss it.
         */
-.                      { unput(*parse_events_text); BEGIN(INITIAL); }
+.                      { unput(*yytext); BEGIN(INITIAL); }
        /*
         * We destroy the scanner after reaching EOF,
         * but anyway just to be sure get back to INIT state.
@@ -143,7 +174,7 @@ r{num_raw_hex}              { return raw(); }
 
 %%
 
-int parse_events_wrap(void)
+int parse_events_wrap(void *scanner __used)
 {
        return 1;
 }
index 362cc59332ae78e0774fed92e3f20a2f751fa211..9525c455d27f9b095343b0e8ceaa3a63add8c2fd 100644 (file)
@@ -1,7 +1,8 @@
-
+%pure-parser
 %name-prefix "parse_events_"
-%parse-param {struct list_head *list_all}
-%parse-param {int *idx}
+%parse-param {void *_data}
+%parse-param {void *scanner}
+%lex-param {void* scanner}
 
 %{
 
@@ -12,8 +13,9 @@
 #include "types.h"
 #include "util.h"
 #include "parse-events.h"
+#include "parse-events-bison.h"
 
-extern int parse_events_lex (void);
+extern int parse_events_lex (YYSTYPE* lvalp, void* scanner);
 
 #define ABORT_ON(val) \
 do { \
@@ -23,6 +25,7 @@ do { \
 
 %}
 
+%token PE_START_EVENTS PE_START_TERMS
 %token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
 %token PE_NAME
 %token PE_MODIFIER_EVENT PE_MODIFIER_BP
@@ -58,24 +61,33 @@ do { \
 }
 %%
 
+start:
+PE_START_EVENTS events
+|
+PE_START_TERMS  terms
+
 events:
 events ',' event | event
 
 event:
 event_def PE_MODIFIER_EVENT
 {
+       struct parse_events_data__events *data = _data;
+
        /*
         * Apply modifier on all events added by single event definition
         * (there could be more events added for multiple tracepoint
         * definitions via '*?'.
         */
        ABORT_ON(parse_events_modifier($1, $2));
-       parse_events_update_lists($1, list_all);
+       parse_events_update_lists($1, &data->list);
 }
 |
 event_def
 {
-       parse_events_update_lists($1, list_all);
+       struct parse_events_data__events *data = _data;
+
+       parse_events_update_lists($1, &data->list);
 }
 
 event_def: event_pmu |
@@ -89,9 +101,10 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME '/' event_config '/'
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_pmu(&list, idx, $1, $3));
+       ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
        parse_events__free_terms($3);
        $$ = list;
 }
@@ -99,94 +112,115 @@ PE_NAME '/' event_config '/'
 event_legacy_symbol:
 PE_VALUE_SYM '/' event_config '/'
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
        int type = $1 >> 16;
        int config = $1 & 255;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, type, config, $3));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+                                         type, config, $3));
        parse_events__free_terms($3);
        $$ = list;
 }
 |
 PE_VALUE_SYM sep_slash_dc
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
        int type = $1 >> 16;
        int config = $1 & 255;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, type, config, NULL));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+                                         type, config, NULL));
        $$ = list;
 }
 
 event_legacy_cache:
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, $5));
+       ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
        $$ = list;
 }
 |
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, NULL));
+       ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
        $$ = list;
 }
 |
 PE_NAME_CACHE_TYPE
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_cache(&list, idx, $1, NULL, NULL));
+       ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
        $$ = list;
 }
 
 event_legacy_mem:
 PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, $4));
+       ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+                                            (void *) $2, $4));
        $$ = list;
 }
 |
 PE_PREFIX_MEM PE_VALUE sep_dc
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, NULL));
+       ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+                                            (void *) $2, NULL));
        $$ = list;
 }
 
 event_legacy_tracepoint:
 PE_NAME ':' PE_NAME
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_tracepoint(&list, idx, $1, $3));
+       ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
        $$ = list;
 }
 
 event_legacy_numeric:
 PE_VALUE ':' PE_VALUE
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, $1, $3, NULL));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL));
        $$ = list;
 }
 
 event_legacy_raw:
 PE_RAW
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, PERF_TYPE_RAW, $1, NULL));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+                                         PERF_TYPE_RAW, $1, NULL));
        $$ = list;
 }
 
+terms: event_config
+{
+       struct parse_events_data__terms *data = _data;
+       data->terms = $1;
+}
+
 event_config:
 event_config ',' event_term
 {
@@ -267,8 +301,7 @@ sep_slash_dc: '/' | ':' |
 
 %%
 
-void parse_events_error(struct list_head *list_all __used,
-                       int *idx __used,
+void parse_events_error(void *data __used, void *scanner __used,
                        char const *msg __used)
 {
 }
index a119a5371699bd1c89575ce02de0d47655548e52..74d0948ec3680908baeae018d74bd2eae0468a86 100644 (file)
@@ -80,6 +80,114 @@ static int pmu_format(char *name, struct list_head *format)
        return 0;
 }
 
+static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
+{
+       struct perf_pmu__alias *alias;
+       char buf[256];
+       int ret;
+
+       ret = fread(buf, 1, sizeof(buf), file);
+       if (ret == 0)
+               return -EINVAL;
+       buf[ret] = 0;
+
+       alias = malloc(sizeof(*alias));
+       if (!alias)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&alias->terms);
+       ret = parse_events_terms(&alias->terms, buf);
+       if (ret) {
+               free(alias);
+               return ret;
+       }
+
+       alias->name = strdup(name);
+       list_add_tail(&alias->list, list);
+       return 0;
+}
+
+/*
+ * Process all the sysfs attributes located under the directory
+ * specified in 'dir' parameter.
+ */
+static int pmu_aliases_parse(char *dir, struct list_head *head)
+{
+       struct dirent *evt_ent;
+       DIR *event_dir;
+       int ret = 0;
+
+       event_dir = opendir(dir);
+       if (!event_dir)
+               return -EINVAL;
+
+       while (!ret && (evt_ent = readdir(event_dir))) {
+               char path[PATH_MAX];
+               char *name = evt_ent->d_name;
+               FILE *file;
+
+               if (!strcmp(name, ".") || !strcmp(name, ".."))
+                       continue;
+
+               snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+               ret = -EINVAL;
+               file = fopen(path, "r");
+               if (!file)
+                       break;
+               ret = perf_pmu__new_alias(head, name, file);
+               fclose(file);
+       }
+
+       closedir(event_dir);
+       return ret;
+}
+
+/*
+ * Reading the pmu event aliases definition, which should be located at:
+ * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
+ */
+static int pmu_aliases(char *name, struct list_head *head)
+{
+       struct stat st;
+       char path[PATH_MAX];
+       const char *sysfs;
+
+       sysfs = sysfs_find_mountpoint();
+       if (!sysfs)
+               return -1;
+
+       snprintf(path, PATH_MAX,
+                "%s/bus/event_source/devices/%s/events", sysfs, name);
+
+       if (stat(path, &st) < 0)
+               return -1;
+
+       if (pmu_aliases_parse(path, head))
+               return -1;
+
+       return 0;
+}
+
+static int pmu_alias_terms(struct perf_pmu__alias *alias,
+                          struct list_head *terms)
+{
+       struct parse_events__term *term, *clone;
+       LIST_HEAD(list);
+       int ret;
+
+       list_for_each_entry(term, &alias->terms, list) {
+               ret = parse_events__term_clone(&clone, term);
+               if (ret) {
+                       parse_events__free_terms(&list);
+                       return ret;
+               }
+               list_add_tail(&clone->list, &list);
+       }
+       list_splice(&list, terms);
+       return 0;
+}
+
 /*
  * Reading/parsing the default pmu type value, which should be
  * located at:
@@ -118,6 +226,7 @@ static struct perf_pmu *pmu_lookup(char *name)
 {
        struct perf_pmu *pmu;
        LIST_HEAD(format);
+       LIST_HEAD(aliases);
        __u32 type;
 
        /*
@@ -135,8 +244,12 @@ static struct perf_pmu *pmu_lookup(char *name)
        if (!pmu)
                return NULL;
 
+       pmu_aliases(name, &aliases);
+
        INIT_LIST_HEAD(&pmu->format);
+       INIT_LIST_HEAD(&pmu->aliases);
        list_splice(&format, &pmu->format);
+       list_splice(&aliases, &pmu->aliases);
        pmu->name = strdup(name);
        pmu->type = type;
        return pmu;
@@ -279,6 +392,59 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
        return pmu_config(&pmu->format, attr, head_terms);
 }
 
+static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
+                                             struct parse_events__term *term)
+{
+       struct perf_pmu__alias *alias;
+       char *name;
+
+       if (parse_events__is_hardcoded_term(term))
+               return NULL;
+
+       if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+               if (term->val.num != 1)
+                       return NULL;
+               if (pmu_find_format(&pmu->format, term->config))
+                       return NULL;
+               name = term->config;
+       } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+               if (strcasecmp(term->config, "event"))
+                       return NULL;
+               name = term->val.str;
+       } else {
+               return NULL;
+       }
+
+       list_for_each_entry(alias, &pmu->aliases, list) {
+               if (!strcasecmp(alias->name, name))
+                       return alias;
+       }
+       return NULL;
+}
+
+/*
+ * Find alias in the terms list and replace it with the terms
+ * defined for the alias
+ */
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
+{
+       struct parse_events__term *term, *h;
+       struct perf_pmu__alias *alias;
+       int ret;
+
+       list_for_each_entry_safe(term, h, head_terms, list) {
+               alias = pmu_find_alias(pmu, term);
+               if (!alias)
+                       continue;
+               ret = pmu_alias_terms(alias, &term->list);
+               if (ret)
+                       return ret;
+               list_del(&term->list);
+               free(term);
+       }
+       return 0;
+}
+
 int perf_pmu__new_format(struct list_head *list, char *name,
                         int config, unsigned long *bits)
 {
index 68c0db965e1f52574375859e821daacce9387a3a..535f2c5258ab05a8110727704c98791c850dc588 100644 (file)
@@ -19,17 +19,26 @@ struct perf_pmu__format {
        struct list_head list;
 };
 
+struct perf_pmu__alias {
+       char *name;
+       struct list_head terms;
+       struct list_head list;
+};
+
 struct perf_pmu {
        char *name;
        __u32 type;
        struct list_head format;
+       struct list_head aliases;
        struct list_head list;
 };
 
 struct perf_pmu *perf_pmu__find(char *name);
 int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
                     struct list_head *head_terms);
-
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
+struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
+                               struct list_head *head_terms);
 int perf_pmu_wrap(void);
 void perf_pmu_error(struct list_head *list, char *name, char const *msg);
 
index 4c1b3d72a1d2d24093733018674024e42723a548..b3620fe127630eba0e829ad31fcef9965fe40666 100644 (file)
@@ -233,7 +233,8 @@ static void define_event_symbols(struct event_format *event,
                define_event_symbols(event, ev_name, args->next);
 }
 
-static inline struct event_format *find_cache_event(int type)
+static inline
+struct event_format *find_cache_event(struct pevent *pevent, int type)
 {
        static char ev_name[256];
        struct event_format *event;
@@ -241,7 +242,7 @@ static inline struct event_format *find_cache_event(int type)
        if (events[type])
                return events[type];
 
-       events[type] = event = trace_find_event(type);
+       events[type] = event = pevent_find_event(pevent, type);
        if (!event)
                return NULL;
 
@@ -252,7 +253,8 @@ static inline struct event_format *find_cache_event(int type)
        return event;
 }
 
-static void perl_process_tracepoint(union perf_event *pevent __unused,
+static void perl_process_tracepoint(union perf_event *perf_event __unused,
+                                   struct pevent *pevent,
                                    struct perf_sample *sample,
                                    struct perf_evsel *evsel,
                                    struct machine *machine __unused,
@@ -275,13 +277,13 @@ static void perl_process_tracepoint(union perf_event *pevent __unused,
        if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                return;
 
-       type = trace_parse_common_type(data);
+       type = trace_parse_common_type(pevent, data);
 
-       event = find_cache_event(type);
+       event = find_cache_event(pevent, type);
        if (!event)
                die("ug! no event found for type %d", type);
 
-       pid = trace_parse_common_pid(data);
+       pid = trace_parse_common_pid(pevent, data);
 
        sprintf(handler, "%s::%s", event->system, event->name);
 
@@ -314,7 +316,8 @@ static void perl_process_tracepoint(union perf_event *pevent __unused,
                                offset = field->offset;
                        XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
                } else { /* FIELD_IS_NUMERIC */
-                       val = read_size(data + field->offset, field->size);
+                       val = read_size(pevent, data + field->offset,
+                                       field->size);
                        if (field->flags & FIELD_IS_SIGNED) {
                                XPUSHs(sv_2mortal(newSViv(val)));
                        } else {
@@ -368,14 +371,15 @@ static void perl_process_event_generic(union perf_event *pevent __unused,
        LEAVE;
 }
 
-static void perl_process_event(union perf_event *pevent,
+static void perl_process_event(union perf_event *event,
+                              struct pevent *pevent,
                               struct perf_sample *sample,
                               struct perf_evsel *evsel,
                               struct machine *machine,
                               struct thread *thread)
 {
-       perl_process_tracepoint(pevent, sample, evsel, machine, thread);
-       perl_process_event_generic(pevent, sample, evsel, machine, thread);
+       perl_process_tracepoint(event, pevent, sample, evsel, machine, thread);
+       perl_process_event_generic(event, sample, evsel, machine, thread);
 }
 
 static void run_start_sub(void)
@@ -448,7 +452,7 @@ static int perl_stop_script(void)
        return 0;
 }
 
-static int perl_generate_script(const char *outfile)
+static int perl_generate_script(struct pevent *pevent, const char *outfile)
 {
        struct event_format *event = NULL;
        struct format_field *f;
@@ -495,7 +499,7 @@ static int perl_generate_script(const char *outfile)
        fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
        fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
 
-       while ((event = trace_find_next_event(event))) {
+       while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
                fprintf(ofp, "\tmy (");
 
index acb9795286c4bd077531fcf65f14cc8d0699a198..a8ca2f8179a951beb7b5fec1f0422e51063bd5fd 100644 (file)
@@ -190,7 +190,8 @@ static void define_event_symbols(struct event_format *event,
                define_event_symbols(event, ev_name, args->next);
 }
 
-static inline struct event_format *find_cache_event(int type)
+static inline
+struct event_format *find_cache_event(struct pevent *pevent, int type)
 {
        static char ev_name[256];
        struct event_format *event;
@@ -198,7 +199,7 @@ static inline struct event_format *find_cache_event(int type)
        if (events[type])
                return events[type];
 
-       events[type] = event = trace_find_event(type);
+       events[type] = event = pevent_find_event(pevent, type);
        if (!event)
                return NULL;
 
@@ -209,7 +210,8 @@ static inline struct event_format *find_cache_event(int type)
        return event;
 }
 
-static void python_process_event(union perf_event *pevent __unused,
+static void python_process_event(union perf_event *perf_event __unused,
+                                struct pevent *pevent,
                                 struct perf_sample *sample,
                                 struct perf_evsel *evsel __unused,
                                 struct machine *machine __unused,
@@ -233,13 +235,13 @@ static void python_process_event(union perf_event *pevent __unused,
        if (!t)
                Py_FatalError("couldn't create Python tuple");
 
-       type = trace_parse_common_type(data);
+       type = trace_parse_common_type(pevent, data);
 
-       event = find_cache_event(type);
+       event = find_cache_event(pevent, type);
        if (!event)
                die("ug! no event found for type %d", type);
 
-       pid = trace_parse_common_pid(data);
+       pid = trace_parse_common_pid(pevent, data);
 
        sprintf(handler_name, "%s__%s", event->system, event->name);
 
@@ -284,7 +286,8 @@ static void python_process_event(union perf_event *pevent __unused,
                                offset = field->offset;
                        obj = PyString_FromString((char *)data + offset);
                } else { /* FIELD_IS_NUMERIC */
-                       val = read_size(data + field->offset, field->size);
+                       val = read_size(pevent, data + field->offset,
+                                       field->size);
                        if (field->flags & FIELD_IS_SIGNED) {
                                if ((long long)val >= LONG_MIN &&
                                    (long long)val <= LONG_MAX)
@@ -438,7 +441,7 @@ out:
        return err;
 }
 
-static int python_generate_script(const char *outfile)
+static int python_generate_script(struct pevent *pevent, const char *outfile)
 {
        struct event_format *event = NULL;
        struct format_field *f;
@@ -487,7 +490,7 @@ static int python_generate_script(const char *outfile)
        fprintf(ofp, "def trace_end():\n");
        fprintf(ofp, "\tprint \"in trace_end\"\n\n");
 
-       while ((event = trace_find_next_event(event))) {
+       while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "def %s__%s(", event->system, event->name);
                fprintf(ofp, "event_name, ");
                fprintf(ofp, "context, ");
index c3e399bcf18deea25f4db0aafa69278b5b4cb570..f5baff1495e6e8916b996a52f999a4f09c8ee386 100644 (file)
@@ -14,6 +14,7 @@
 #include "sort.h"
 #include "util.h"
 #include "cpumap.h"
+#include "event-parse.h"
 
 static int perf_session__open(struct perf_session *self, bool force)
 {
@@ -289,7 +290,6 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
 }
 
 int machine__resolve_callchain(struct machine *self,
-                              struct perf_evsel *evsel __used,
                               struct thread *thread,
                               struct ip_callchain *chain,
                               struct symbol **parent)
@@ -1449,7 +1449,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
        ret += hists__fprintf_nr_events(&session->hists, fp);
 
        list_for_each_entry(pos, &session->evlist->entries, node) {
-               ret += fprintf(fp, "%s stats:\n", event_name(pos));
+               ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
                ret += hists__fprintf_nr_events(&pos->hists, fp);
        }
 
@@ -1490,8 +1490,8 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 }
 
 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
-                         struct machine *machine, struct perf_evsel *evsel,
-                         int print_sym, int print_dso, int print_symoffset)
+                         struct machine *machine, int print_sym,
+                         int print_dso, int print_symoffset)
 {
        struct addr_location al;
        struct callchain_cursor_node *node;
@@ -1505,7 +1505,7 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
 
        if (symbol_conf.use_callchain && sample->callchain) {
 
-               if (machine__resolve_callchain(machine, evsel, al.thread,
+               if (machine__resolve_callchain(machine, al.thread,
                                                sample->callchain, NULL) != 0) {
                        if (verbose)
                                error("Failed to resolve callchain. Skipping\n");
@@ -1611,3 +1611,58 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
        perf_header__fprintf_info(session, fp, full);
        fprintf(fp, "# ========\n#\n");
 }
+
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+                                            const struct perf_evsel_str_handler *assocs,
+                                            size_t nr_assocs)
+{
+       struct perf_evlist *evlist = session->evlist;
+       struct event_format *format;
+       struct perf_evsel *evsel;
+       char *tracepoint, *name;
+       size_t i;
+       int err;
+
+       for (i = 0; i < nr_assocs; i++) {
+               err = -ENOMEM;
+               tracepoint = strdup(assocs[i].name);
+               if (tracepoint == NULL)
+                       goto out;
+
+               err = -ENOENT;
+               name = strchr(tracepoint, ':');
+               if (name == NULL)
+                       goto out_free;
+
+               *name++ = '\0';
+               format = pevent_find_event_by_name(session->pevent,
+                                                  tracepoint, name);
+               if (format == NULL) {
+                       /*
+                        * Adding a handler for an event not in the session,
+                        * just ignore it.
+                        */
+                       goto next;
+               }
+
+               evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
+               if (evsel == NULL)
+                       goto next;
+
+               err = -EEXIST;
+               if (evsel->handler.func != NULL)
+                       goto out_free;
+               evsel->handler.func = assocs[i].handler;
+next:
+               free(tracepoint);
+       }
+
+       err = 0;
+out:
+       return err;
+
+out_free:
+       free(tracepoint);
+       goto out;
+}
index 0c702e3f0a364272a9d979b04786403c4e079bbe..7c435bde6eb0ea1f913e1dadfca3da0c23432302 100644 (file)
@@ -33,6 +33,7 @@ struct perf_session {
        struct machine          host_machine;
        struct rb_root          machines;
        struct perf_evlist      *evlist;
+       struct pevent           *pevent;
        /*
         * FIXME: Need to split this up further, we need global
         *        stats + per event stats. 'perf diff' also needs
@@ -151,11 +152,20 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
                                            unsigned int type);
 
 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
-                         struct machine *machine, struct perf_evsel *evsel,
-                         int print_sym, int print_dso, int print_symoffset);
+                         struct machine *machine, int print_sym,
+                         int print_dso, int print_symoffset);
 
 int perf_session__cpu_bitmap(struct perf_session *session,
                             const char *cpu_list, unsigned long *cpu_bitmap);
 
 void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+struct perf_evsel_str_handler;
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+                                            const struct perf_evsel_str_handler *assocs,
+                                            size_t nr_assocs);
+
+#define perf_session__set_tracepoints_handlers(session, array) \
+       __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
 #endif /* __PERF_SESSION_H */
index a27237430c5f1a8b9fd24458609cc405eff9ac73..0f5a0a496bc491e545d3bf30e8820163fbba61c6 100644 (file)
@@ -241,6 +241,54 @@ struct sort_entry sort_sym = {
        .se_width_idx   = HISTC_SYMBOL,
 };
 
+/* --sort srcline */
+
+static int64_t
+sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return (int64_t)(right->ip - left->ip);
+}
+
+static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
+                                  size_t size, unsigned int width __used)
+{
+       FILE *fp;
+       char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
+       size_t line_len;
+
+       if (path != NULL)
+               goto out_path;
+
+       snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64,
+                self->ms.map->dso->long_name, self->ip);
+       fp = popen(cmd, "r");
+       if (!fp)
+               goto out_ip;
+
+       if (getline(&path, &line_len, fp) < 0 || !line_len)
+               goto out_ip;
+       fclose(fp);
+       self->srcline = strdup(path);
+       if (self->srcline == NULL)
+               goto out_ip;
+
+       nl = strchr(self->srcline, '\n');
+       if (nl != NULL)
+               *nl = '\0';
+       path = self->srcline;
+out_path:
+       return repsep_snprintf(bf, size, "%s", path);
+out_ip:
+       return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
+}
+
+struct sort_entry sort_srcline = {
+       .se_header      = "Source:Line",
+       .se_cmp         = sort__srcline_cmp,
+       .se_snprintf    = hist_entry__srcline_snprintf,
+       .se_width_idx   = HISTC_SRCLINE,
+};
+
 /* --sort parent */
 
 static int64_t
@@ -439,6 +487,7 @@ static struct sort_dimension sort_dimensions[] = {
        DIM(SORT_PARENT, "parent", sort_parent),
        DIM(SORT_CPU, "cpu", sort_cpu),
        DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+       DIM(SORT_SRCLINE, "srcline", sort_srcline),
 };
 
 int sort_dimension__add(const char *tok)
index 472aa5a63a58f571fccd884f1e7f16f08b3b5ec7..e724b26acd5135e609aca16212edaf7ec56a9da4 100644 (file)
@@ -71,6 +71,7 @@ struct hist_entry {
        char                    level;
        bool                    used;
        u8                      filtered;
+       char                    *srcline;
        struct symbol           *parent;
        union {
                unsigned long     position;
@@ -93,6 +94,7 @@ enum sort_type {
        SORT_SYM_FROM,
        SORT_SYM_TO,
        SORT_MISPREDICT,
+       SORT_SRCLINE,
 };
 
 /*
index d5836382ff2cad7d00fd156d1296f99f8433f698..199bc4d8905d4ff75d31a60471673fac68470aec 100644 (file)
@@ -313,3 +313,25 @@ int strtailcmp(const char *s1, const char *s2)
        return 0;
 }
 
+/**
+ * rtrim - Removes trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns @s.
+ */
+char *rtrim(char *s)
+{
+       size_t size = strlen(s);
+       char *end;
+
+       if (!size)
+               return s;
+
+       end = s + size - 1;
+       while (end >= s && isspace(*end))
+               end--;
+       *(end + 1) = '\0';
+
+       return s;
+}
index 3e2e5ea0f03f692c41f62e8aeb1f5d3382c86ce1..994f4ffdcd055f026149aef6186eb106fa2952da 100644 (file)
@@ -1590,11 +1590,62 @@ out:
        return err;
 }
 
+static int filename__read_debuglink(const char *filename,
+                                   char *debuglink, size_t size)
+{
+       int fd, err = -1;
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+       Elf_Data *data;
+       Elf_Scn *sec;
+       Elf_Kind ek;
+
+       fd = open(filename, O_RDONLY);
+       if (fd < 0)
+               goto out;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL) {
+               pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+               goto out_close;
+       }
+
+       ek = elf_kind(elf);
+       if (ek != ELF_K_ELF)
+               goto out_close;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL) {
+               pr_err("%s: cannot get elf header.\n", __func__);
+               goto out_close;
+       }
+
+       sec = elf_section_by_name(elf, &ehdr, &shdr,
+                                 ".gnu_debuglink", NULL);
+       if (sec == NULL)
+               goto out_close;
+
+       data = elf_getdata(sec, NULL);
+       if (data == NULL)
+               goto out_close;
+
+       /* the start of this section is a zero-terminated string */
+       strncpy(debuglink, data->d_buf, size);
+
+       elf_end(elf);
+
+out_close:
+       close(fd);
+out:
+       return err;
+}
+
 char dso__symtab_origin(const struct dso *dso)
 {
        static const char origin[] = {
                [SYMTAB__KALLSYMS]            = 'k',
                [SYMTAB__JAVA_JIT]            = 'j',
+               [SYMTAB__DEBUGLINK]           = 'l',
                [SYMTAB__BUILD_ID_CACHE]      = 'B',
                [SYMTAB__FEDORA_DEBUGINFO]    = 'f',
                [SYMTAB__UBUNTU_DEBUGINFO]    = 'u',
@@ -1662,10 +1713,22 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
         */
        want_symtab = 1;
 restart:
-       for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE;
+       for (dso->symtab_type = SYMTAB__DEBUGLINK;
             dso->symtab_type != SYMTAB__NOT_FOUND;
             dso->symtab_type++) {
                switch (dso->symtab_type) {
+               case SYMTAB__DEBUGLINK: {
+                       char *debuglink;
+                       strncpy(name, dso->long_name, size);
+                       debuglink = name + dso->long_name_len;
+                       while (debuglink != name && *debuglink != '/')
+                               debuglink--;
+                       if (*debuglink == '/')
+                               debuglink++;
+                       filename__read_debuglink(dso->long_name, debuglink,
+                                                size - (debuglink - name));
+                       }
+                       break;
                case SYMTAB__BUILD_ID_CACHE:
                        /* skip the locally configured cache if a symfs is given */
                        if (symbol_conf.symfs[0] ||
index af0752b1aca1a9097b1eb552da416683bf3394c7..a884b99017f085c0dceb47e7b766dab8527dc7df 100644 (file)
@@ -257,6 +257,7 @@ enum symtab_type {
        SYMTAB__KALLSYMS = 0,
        SYMTAB__GUEST_KALLSYMS,
        SYMTAB__JAVA_JIT,
+       SYMTAB__DEBUGLINK,
        SYMTAB__BUILD_ID_CACHE,
        SYMTAB__FEDORA_DEBUGINFO,
        SYMTAB__UBUNTU_DEBUGINFO,
index abe0e8e9506820ac4d5e5b9f2b08c89ffde49b73..7eeebcee291c4c70a8e0bc0dc0371d107080a1d9 100644 (file)
@@ -65,7 +65,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
                                top->freq ? "Hz" : "");
        }
 
-       ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel));
+       ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
 
        ret += SNPRINTF(bf + ret, size - ret, "], ");
 
index df2fddbf0cd2f46d376d691786295526fa1deab0..a51bd86f4d09f16041ea2b570b6e31983f5e23b2 100644 (file)
@@ -32,29 +32,25 @@ int header_page_size_size;
 int header_page_ts_size;
 int header_page_data_offset;
 
-struct pevent *perf_pevent;
-static struct pevent *pevent;
-
 bool latency_format;
 
-int read_trace_init(int file_bigendian, int host_bigendian)
+struct pevent *read_trace_init(int file_bigendian, int host_bigendian)
 {
-       if (pevent)
-               return 0;
-
-       perf_pevent = pevent_alloc();
-       pevent = perf_pevent;
+       struct pevent *pevent = pevent_alloc();
 
-       pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
-       pevent_set_file_bigendian(pevent, file_bigendian);
-       pevent_set_host_bigendian(pevent, host_bigendian);
+       if (pevent != NULL) {
+               pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+               pevent_set_file_bigendian(pevent, file_bigendian);
+               pevent_set_host_bigendian(pevent, host_bigendian);
+       }
 
-       return 0;
+       return pevent;
 }
 
 static int get_common_field(struct scripting_context *context,
                            int *offset, int *size, const char *type)
 {
+       struct pevent *pevent = context->pevent;
        struct event_format *event;
        struct format_field *field;
 
@@ -150,7 +146,7 @@ void *raw_field_ptr(struct event_format *event, const char *name, void *data)
        return data + field->offset;
 }
 
-int trace_parse_common_type(void *data)
+int trace_parse_common_type(struct pevent *pevent, void *data)
 {
        struct pevent_record record;
 
@@ -158,7 +154,7 @@ int trace_parse_common_type(void *data)
        return pevent_data_type(pevent, &record);
 }
 
-int trace_parse_common_pid(void *data)
+int trace_parse_common_pid(struct pevent *pevent, void *data)
 {
        struct pevent_record record;
 
@@ -166,27 +162,21 @@ int trace_parse_common_pid(void *data)
        return pevent_data_pid(pevent, &record);
 }
 
-unsigned long long read_size(void *ptr, int size)
+unsigned long long read_size(struct pevent *pevent, void *ptr, int size)
 {
        return pevent_read_number(pevent, ptr, size);
 }
 
-struct event_format *trace_find_event(int type)
-{
-       return pevent_find_event(pevent, type);
-}
-
-
-void print_trace_event(int cpu, void *data, int size)
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
 {
        struct event_format *event;
        struct pevent_record record;
        struct trace_seq s;
        int type;
 
-       type = trace_parse_common_type(data);
+       type = trace_parse_common_type(pevent, data);
 
-       event = trace_find_event(type);
+       event = pevent_find_event(pevent, type);
        if (!event) {
                warning("ug! no event found for type %d", type);
                return;
@@ -203,8 +193,8 @@ void print_trace_event(int cpu, void *data, int size)
        printf("\n");
 }
 
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
-                 char *comm)
+void print_event(struct pevent *pevent, int cpu, void *data, int size,
+                unsigned long long nsecs, char *comm)
 {
        struct pevent_record record;
        struct trace_seq s;
@@ -227,7 +217,8 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
        printf("\n");
 }
 
-void parse_proc_kallsyms(char *file, unsigned int size __unused)
+void parse_proc_kallsyms(struct pevent *pevent,
+                        char *file, unsigned int size __unused)
 {
        unsigned long long addr;
        char *func;
@@ -258,7 +249,8 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
        }
 }
 
-void parse_ftrace_printk(char *file, unsigned int size __unused)
+void parse_ftrace_printk(struct pevent *pevent,
+                        char *file, unsigned int size __unused)
 {
        unsigned long long addr;
        char *printk;
@@ -282,17 +274,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused)
        }
 }
 
-int parse_ftrace_file(char *buf, unsigned long size)
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size)
 {
        return pevent_parse_event(pevent, buf, size, "ftrace");
 }
 
-int parse_event_file(char *buf, unsigned long size, char *sys)
+int parse_event_file(struct pevent *pevent,
+                    char *buf, unsigned long size, char *sys)
 {
        return pevent_parse_event(pevent, buf, size, sys);
 }
 
-struct event_format *trace_find_next_event(struct event_format *event)
+struct event_format *trace_find_next_event(struct pevent *pevent,
+                                          struct event_format *event)
 {
        static int idx;
 
index f097e0dd6c5cb4986aa1e5cd1a1cb61cdca55cdf..719ed74a85652541d19e1f7dc15d26e2348a31fc 100644 (file)
@@ -114,20 +114,20 @@ static void skip(int size)
        };
 }
 
-static unsigned int read4(void)
+static unsigned int read4(struct pevent *pevent)
 {
        unsigned int data;
 
        read_or_die(&data, 4);
-       return __data2host4(perf_pevent, data);
+       return __data2host4(pevent, data);
 }
 
-static unsigned long long read8(void)
+static unsigned long long read8(struct pevent *pevent)
 {
        unsigned long long data;
 
        read_or_die(&data, 8);
-       return __data2host8(perf_pevent, data);
+       return __data2host8(pevent, data);
 }
 
 static char *read_string(void)
@@ -168,12 +168,12 @@ static char *read_string(void)
        return str;
 }
 
-static void read_proc_kallsyms(void)
+static void read_proc_kallsyms(struct pevent *pevent)
 {
        unsigned int size;
        char *buf;
 
-       size = read4();
+       size = read4(pevent);
        if (!size)
                return;
 
@@ -181,29 +181,29 @@ static void read_proc_kallsyms(void)
        read_or_die(buf, size);
        buf[size] = '\0';
 
-       parse_proc_kallsyms(buf, size);
+       parse_proc_kallsyms(pevent, buf, size);
 
        free(buf);
 }
 
-static void read_ftrace_printk(void)
+static void read_ftrace_printk(struct pevent *pevent)
 {
        unsigned int size;
        char *buf;
 
-       size = read4();
+       size = read4(pevent);
        if (!size)
                return;
 
        buf = malloc_or_die(size);
        read_or_die(buf, size);
 
-       parse_ftrace_printk(buf, size);
+       parse_ftrace_printk(pevent, buf, size);
 
        free(buf);
 }
 
-static void read_header_files(void)
+static void read_header_files(struct pevent *pevent)
 {
        unsigned long long size;
        char *header_event;
@@ -214,7 +214,7 @@ static void read_header_files(void)
        if (memcmp(buf, "header_page", 12) != 0)
                die("did not read header page");
 
-       size = read8();
+       size = read8(pevent);
        skip(size);
 
        /*
@@ -227,47 +227,48 @@ static void read_header_files(void)
        if (memcmp(buf, "header_event", 13) != 0)
                die("did not read header event");
 
-       size = read8();
+       size = read8(pevent);
        header_event = malloc_or_die(size);
        read_or_die(header_event, size);
        free(header_event);
 }
 
-static void read_ftrace_file(unsigned long long size)
+static void read_ftrace_file(struct pevent *pevent, unsigned long long size)
 {
        char *buf;
 
        buf = malloc_or_die(size);
        read_or_die(buf, size);
-       parse_ftrace_file(buf, size);
+       parse_ftrace_file(pevent, buf, size);
        free(buf);
 }
 
-static void read_event_file(char *sys, unsigned long long size)
+static void read_event_file(struct pevent *pevent, char *sys,
+                           unsigned long long size)
 {
        char *buf;
 
        buf = malloc_or_die(size);
        read_or_die(buf, size);
-       parse_event_file(buf, size, sys);
+       parse_event_file(pevent, buf, size, sys);
        free(buf);
 }
 
-static void read_ftrace_files(void)
+static void read_ftrace_files(struct pevent *pevent)
 {
        unsigned long long size;
        int count;
        int i;
 
-       count = read4();
+       count = read4(pevent);
 
        for (i = 0; i < count; i++) {
-               size = read8();
-               read_ftrace_file(size);
+               size = read8(pevent);
+               read_ftrace_file(pevent, size);
        }
 }
 
-static void read_event_files(void)
+static void read_event_files(struct pevent *pevent)
 {
        unsigned long long size;
        char *sys;
@@ -275,15 +276,15 @@ static void read_event_files(void)
        int count;
        int i,x;
 
-       systems = read4();
+       systems = read4(pevent);
 
        for (i = 0; i < systems; i++) {
                sys = read_string();
 
-               count = read4();
+               count = read4(pevent);
                for (x=0; x < count; x++) {
-                       size = read8();
-                       read_event_file(sys, size);
+                       size = read8(pevent);
+                       read_event_file(pevent, sys, size);
                }
        }
 }
@@ -377,7 +378,7 @@ static int calc_index(void *ptr, int cpu)
        return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
 }
 
-struct pevent_record *trace_peek_data(int cpu)
+struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu)
 {
        struct pevent_record *data;
        void *page = cpu_data[cpu].page;
@@ -399,15 +400,15 @@ struct pevent_record *trace_peek_data(int cpu)
                /* FIXME: handle header page */
                if (header_page_ts_size != 8)
                        die("expected a long long type for timestamp");
-               cpu_data[cpu].timestamp = data2host8(perf_pevent, ptr);
+               cpu_data[cpu].timestamp = data2host8(pevent, ptr);
                ptr += 8;
                switch (header_page_size_size) {
                case 4:
-                       cpu_data[cpu].page_size = data2host4(perf_pevent, ptr);
+                       cpu_data[cpu].page_size = data2host4(pevent, ptr);
                        ptr += 4;
                        break;
                case 8:
-                       cpu_data[cpu].page_size = data2host8(perf_pevent, ptr);
+                       cpu_data[cpu].page_size = data2host8(pevent, ptr);
                        ptr += 8;
                        break;
                default:
@@ -421,10 +422,10 @@ read_again:
 
        if (idx >= cpu_data[cpu].page_size) {
                get_next_page(cpu);
-               return trace_peek_data(cpu);
+               return trace_peek_data(pevent, cpu);
        }
 
-       type_len_ts = data2host4(perf_pevent, ptr);
+       type_len_ts = data2host4(pevent, ptr);
        ptr += 4;
 
        type_len = type_len4host(type_len_ts);
@@ -434,14 +435,14 @@ read_again:
        case RINGBUF_TYPE_PADDING:
                if (!delta)
                        die("error, hit unexpected end of page");
-               length = data2host4(perf_pevent, ptr);
+               length = data2host4(pevent, ptr);
                ptr += 4;
                length *= 4;
                ptr += length;
                goto read_again;
 
        case RINGBUF_TYPE_TIME_EXTEND:
-               extend = data2host4(perf_pevent, ptr);
+               extend = data2host4(pevent, ptr);
                ptr += 4;
                extend <<= TS_SHIFT;
                extend += delta;
@@ -452,7 +453,7 @@ read_again:
                ptr += 12;
                break;
        case 0:
-               length = data2host4(perf_pevent, ptr);
+               length = data2host4(pevent, ptr);
                ptr += 4;
                die("here! length=%d", length);
                break;
@@ -477,17 +478,17 @@ read_again:
        return data;
 }
 
-struct pevent_record *trace_read_data(int cpu)
+struct pevent_record *trace_read_data(struct pevent *pevent, int cpu)
 {
        struct pevent_record *data;
 
-       data = trace_peek_data(cpu);
+       data = trace_peek_data(pevent, cpu);
        cpu_data[cpu].next = NULL;
 
        return data;
 }
 
-ssize_t trace_report(int fd, bool __repipe)
+ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
 {
        char buf[BUFSIZ];
        char test[] = { 23, 8, 68 };
@@ -519,30 +520,32 @@ ssize_t trace_report(int fd, bool __repipe)
        file_bigendian = buf[0];
        host_bigendian = bigendian();
 
-       read_trace_init(file_bigendian, host_bigendian);
+       *ppevent = read_trace_init(file_bigendian, host_bigendian);
+       if (*ppevent == NULL)
+               die("read_trace_init failed");
 
        read_or_die(buf, 1);
        long_size = buf[0];
 
-       page_size = read4();
+       page_size = read4(*ppevent);
 
-       read_header_files();
+       read_header_files(*ppevent);
 
-       read_ftrace_files();
-       read_event_files();
-       read_proc_kallsyms();
-       read_ftrace_printk();
+       read_ftrace_files(*ppevent);
+       read_event_files(*ppevent);
+       read_proc_kallsyms(*ppevent);
+       read_ftrace_printk(*ppevent);
 
        size = calc_data_size - 1;
        calc_data_size = 0;
        repipe = false;
 
        if (show_funcs) {
-               pevent_print_funcs(perf_pevent);
+               pevent_print_funcs(*ppevent);
                return size;
        }
        if (show_printk) {
-               pevent_print_printk(perf_pevent);
+               pevent_print_printk(*ppevent);
                return size;
        }
 
index 18ae6c1831d3896263c5a7c3ff58f264bcefbfa5..474aa7a7df43e178717c7808c2ee279c4d026fb2 100644 (file)
@@ -36,6 +36,7 @@ static int stop_script_unsupported(void)
 }
 
 static void process_event_unsupported(union perf_event *event __unused,
+                                     struct pevent *pevent __unused,
                                      struct perf_sample *sample __unused,
                                      struct perf_evsel *evsel __unused,
                                      struct machine *machine __unused,
@@ -61,7 +62,8 @@ static int python_start_script_unsupported(const char *script __unused,
        return -1;
 }
 
-static int python_generate_script_unsupported(const char *outfile __unused)
+static int python_generate_script_unsupported(struct pevent *pevent __unused,
+                                             const char *outfile __unused)
 {
        print_python_unsupported_msg();
 
@@ -122,7 +124,8 @@ static int perl_start_script_unsupported(const char *script __unused,
        return -1;
 }
 
-static int perl_generate_script_unsupported(const char *outfile __unused)
+static int perl_generate_script_unsupported(struct pevent *pevent __unused,
+                                           const char *outfile __unused)
 {
        print_perl_unsupported_msg();
 
index 639852ac1117109849adc8386c66efbf0ccba24f..8fef1d6687b73250a24e682b4c41a9fb29384421 100644 (file)
@@ -8,6 +8,7 @@
 struct machine;
 struct perf_sample;
 union perf_event;
+struct perf_tool;
 struct thread;
 
 extern int header_page_size_size;
@@ -29,35 +30,36 @@ enum {
 
 int bigendian(void);
 
-int read_trace_init(int file_bigendian, int host_bigendian);
-void print_trace_event(int cpu, void *data, int size);
+struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size);
 
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
-                 char *comm);
+void print_event(struct pevent *pevent, int cpu, void *data, int size,
+                unsigned long long nsecs, char *comm);
 
-int parse_ftrace_file(char *buf, unsigned long size);
-int parse_event_file(char *buf, unsigned long size, char *sys);
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
+int parse_event_file(struct pevent *pevent,
+                    char *buf, unsigned long size, char *sys);
 
-struct pevent_record *trace_peek_data(int cpu);
-struct event_format *trace_find_event(int type);
+struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu);
 
 unsigned long long
 raw_field_value(struct event_format *event, const char *name, void *data);
 void *raw_field_ptr(struct event_format *event, const char *name, void *data);
 
-void parse_proc_kallsyms(char *file, unsigned int size __unused);
-void parse_ftrace_printk(char *file, unsigned int size __unused);
+void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
+void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
 
-ssize_t trace_report(int fd, bool repipe);
+ssize_t trace_report(int fd, struct pevent **pevent, bool repipe);
 
-int trace_parse_common_type(void *data);
-int trace_parse_common_pid(void *data);
+int trace_parse_common_type(struct pevent *pevent, void *data);
+int trace_parse_common_pid(struct pevent *pevent, void *data);
 
-struct event_format *trace_find_next_event(struct event_format *event);
-unsigned long long read_size(void *ptr, int size);
+struct event_format *trace_find_next_event(struct pevent *pevent,
+                                          struct event_format *event);
+unsigned long long read_size(struct pevent *pevent, void *ptr, int size);
 unsigned long long eval_flag(const char *flag);
 
-struct pevent_record *trace_read_data(int cpu);
+struct pevent_record *trace_read_data(struct pevent *pevent, int cpu);
 int read_tracing_data(int fd, struct list_head *pattrs);
 
 struct tracing_data {
@@ -77,11 +79,12 @@ struct scripting_ops {
        int (*start_script) (const char *script, int argc, const char **argv);
        int (*stop_script) (void);
        void (*process_event) (union perf_event *event,
+                              struct pevent *pevent,
                               struct perf_sample *sample,
                               struct perf_evsel *evsel,
                               struct machine *machine,
                               struct thread *thread);
-       int (*generate_script) (const char *outfile);
+       int (*generate_script) (struct pevent *pevent, const char *outfile);
 };
 
 int script_spec_register(const char *spec, struct scripting_ops *ops);
@@ -90,6 +93,7 @@ void setup_perl_scripting(void);
 void setup_python_scripting(void);
 
 struct scripting_context {
+       struct pevent *pevent;
        void *event_data;
 };
 
index 2daaedb83d8425c5e87077f05f2712d865b49368..b13c7331eaf8cbf89a8a1b6626238e44ab84d657 100644 (file)
@@ -264,4 +264,6 @@ bool is_power_of_2(unsigned long n)
 
 size_t hex_width(u64 v);
 
+char *rtrim(char *s);
+
 #endif
This page took 0.214657 seconds and 5 git commands to generate.