Merge branches 'iommu/fixes', 'arm/omap', 'arm/smmu', 'arm/shmobile', 'x86/amd',...
authorJoerg Roedel <jroedel@suse.de>
Fri, 30 May 2014 18:22:10 +0000 (20:22 +0200)
committerJoerg Roedel <jroedel@suse.de>
Fri, 30 May 2014 18:22:10 +0000 (20:22 +0200)
194 files changed:
Documentation/DocBook/drm.tmpl
Documentation/DocBook/media/Makefile
Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/email-clients.txt
Documentation/filesystems/proc.txt
Documentation/hwmon/sysfs-interface
Documentation/java.txt
Documentation/networking/filter.txt
Documentation/networking/packet_mmap.txt
MAINTAINERS
Makefile
arch/parisc/kernel/syscall_table.S
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/sysfs.c
arch/sparc/lib/NG2memcpy.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/tsb.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/net/bpf_jit_comp.c
drivers/dma/dmaengine.c
drivers/dma/mv_xor.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/sid.h
drivers/hwmon/Kconfig
drivers/hwmon/ntc_thermistor.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/fsl_pamu.c
drivers/iommu/msm_iommu_dev.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iopgtable.h
drivers/iommu/shmobile-ipmmu.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bonding.h
drivers/net/can/c_can/Kconfig
drivers/net/can/c_can/c_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/altera/Makefile
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_msgdmahw.h
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdmahw.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/altera/altera_utils.c
drivers/net/ethernet/altera/altera_utils.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/ec_bhf.c [new file with mode: 0644]
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/usb/cdc_mbim.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/ptp/Kconfig
drivers/scsi/scsi_transport_sas.c
fs/afs/cmservice.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4state.c
fs/ocfs2/dlm/dlmmaster.c
include/linux/dmaengine.h
include/linux/if_macvlan.h
include/linux/if_vlan.h
include/linux/mlx4/qp.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of_mdio.h
include/linux/perf_event.h
include/linux/rtnetlink.h
include/linux/sched.h
include/net/cfg80211.h
include/net/ip6_route.h
include/net/netns/ipv4.h
include/uapi/linux/nl80211.h
kernel/events/core.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpupri.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
mm/filemap.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/bridge/br_netfilter.c
net/core/dev.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/utils.c
net/dsa/dsa.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_protocol.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/tcpv6_offload.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_protocol.c
net/iucv/af_iucv.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/util.c
net/mac80211/vht.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink.c
net/rxrpc/ar-key.c
net/sched/cls_tcindex.c
net/wireless/scan.c
net/wireless/sme.c
tools/Makefile
tools/lib/lockdep/Makefile

index 677a02553ec0cd772c1a86c73b7d314f48376190..ba60d93c18551af17db0e1bcbca7670662c025f9 100644 (file)
@@ -79,7 +79,7 @@
   <partintro>
     <para>
       This first part of the DRM Developer's Guide documents core DRM code,
-      helper libraries for writting drivers and generic userspace interfaces
+      helper libraries for writing drivers and generic userspace interfaces
       exposed by DRM drivers.
     </para>
   </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
       providing a solution to every graphics memory-related problems, GEM
       identified common code between drivers and created a support library to
       share it. GEM has simpler initialization and execution requirements than
-      TTM, but has no video RAM management capabitilies and is thus limited to
+      TTM, but has no video RAM management capabilities and is thus limited to
       UMA devices.
     </para>
     <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
            vice versa. Drivers must use the kernel dma-buf buffer sharing framework
            to manage the PRIME file descriptors. Similar to the mode setting
            API PRIME is agnostic to the underlying buffer object manager, as
-           long as handles are 32bit unsinged integers.
+           long as handles are 32bit unsigned integers.
          </para>
          <para>
            While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
       first create properties and then create and associate individual instances
       of those properties to objects. A property can be instantiated multiple
       times and associated with different objects. Values are stored in property
-      instances, and all other property information are stored in the propery
+      instances, and all other property information are stored in the property
       and shared between all instances of the property.
     </para>
     <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
   <sect1>
     <title>Legacy Support Code</title>
     <para>
-      The section very brievely covers some of the old legacy support code which
+      The section very briefly covers some of the old legacy support code which
       is only used by old DRM drivers which have done a so-called shadow-attach
       to the underlying device instead of registering as a real driver. This
-      also includes some of the old generic buffer mangement and command
+      also includes some of the old generic buffer management and command
       submission code. Do not use any of this in new and modern drivers.
     </para>
 
index f9fd615427fbd4c0a718d66ed1b7de006ab796e6..1d27f0a1abd1e1872b0e05693ab35d6ecd64b0f2 100644 (file)
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
 #
 
 install_media_images = \
-       $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+       $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
        $(Q)base64 -d $< >$@
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
new file mode 100644 (file)
index 0000000..6fa4c73
--- /dev/null
@@ -0,0 +1,70 @@
+Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit)
+
+Samsung's Exynos architecture contains System MMUs that enables scattered
+physical memory chunks visible as a contiguous region to DMA-capable peripheral
+devices like MFC, FIMC, FIMD, GScaler, FIMC-IS and so forth.
+
+System MMU is an IOMMU and supports identical translation table format to
+ARMv7 translation tables with minimum set of page properties including access
+permissions, shareability and security protection. In addition, System MMU has
+another capabilities like L2 TLB or block-fetch buffers to minimize translation
+latency.
+
+System MMUs are in many to one relation with peripheral devices, i.e. single
+peripheral device might have multiple System MMUs (usually one for each bus
+master), but one System MMU can handle transactions from only one peripheral
+device. The relation between a System MMU and the peripheral device needs to be
+defined in device node of the peripheral device.
+
+MFC in all Exynos SoCs and FIMD, M2M Scalers and G2D in Exynos5420 has 2 System
+MMUs.
+* MFC has one System MMU on its left and right bus.
+* FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU
+  for window 1, 2 and 3.
+* M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and
+  the other System MMU on the write channel.
+The drivers must consider how to handle those System MMUs. One of the idea is
+to implement child devices or sub-devices which are the client devices of the
+System MMU.
+
+Note:
+The current DT binding for the Exynos System MMU is incomplete.
+The following properties can be removed or changed, if found incompatible with
+the "Generic IOMMU Binding" support for attaching devices to the IOMMU.
+
+Required properties:
+- compatible: Should be "samsung,exynos-sysmmu"
+- reg: A tuple of base address and size of System MMU registers.
+- interrupt-parent: The phandle of the interrupt controller of System MMU
+- interrupts: An interrupt specifier for interrupt signal of System MMU,
+             according to the format defined by a particular interrupt
+             controller.
+- clock-names: Should be "sysmmu" if the System MMU is needed to gate its clock.
+              Optional "master" if the clock to the System MMU is gated by
+              another gate clock other than "sysmmu".
+              Exynos4 SoCs, there needs no "master" clock.
+              Exynos5 SoCs, some System MMUs must have "master" clocks.
+- clocks: Required if the System MMU is needed to gate its clock.
+- samsung,power-domain: Required if the System MMU is needed to gate its power.
+         Please refer to the following document:
+         Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+
+Examples:
+       gsc_0: gsc@13e00000 {
+               compatible = "samsung,exynos5-gsc";
+               reg = <0x13e00000 0x1000>;
+               interrupts = <0 85 0>;
+               samsung,power-domain = <&pd_gsc>;
+               clocks = <&clock CLK_GSCL0>;
+               clock-names = "gscl";
+       };
+
+       sysmmu_gsc0: sysmmu@13E80000 {
+               compatible = "samsung,exynos-sysmmu";
+               reg = <0x13E80000 0x1000>;
+               interrupt-parent = <&combiner>;
+               interrupts = <2 0>;
+               clock-names = "sysmmu", "master";
+               clocks = <&clock CLK_SMMU_GSCL0>, <&clock CLK_GSCL0>;
+               samsung,power-domain = <&pd_gsc>;
+       };
index c79bab025369af4bb6320ba0e90f7fb386942cc1..8dbcf8295c6c9ceaa4eb7518694acf3b09095dc1 100644 (file)
@@ -14,7 +14,7 @@ node.
 Example:
 
 aliases {
-       mdio-gpio0 = <&mdio0>;
+       mdio-gpio0 = &mdio0;
 };
 
 mdio0: mdio {
index e9f5daccbd02e2d8e97b489faf372dbb307529d8..4e30ebaa9e5b2652950b2383817e852d2e2239e0 100644 (file)
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
 
 - Edit your Thunderbird config settings so that it won't use format=flowed.
   Go to "edit->preferences->advanced->config editor" to bring up the
-  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
-  "false".
+  thunderbird's registry editor.
 
-- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
+- Set "mailnews.send_plaintext_flowed" to "false"
 
-- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
+- Set "mailnews.wraplength" from "72" to "0"
 
-- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
+- "View" > "Message Body As" > "Plain Text"
 
-- Install the "toggle wordwrap" extension.  Download the file from:
-    https://addons.mozilla.org/thunderbird/addon/2351/
-  Then go to "tools->add ons", select "install" at the bottom of the screen,
-  and browse to where you saved the .xul file.  This adds an "Enable
-  Wordwrap" entry under the Options menu of the message composer.
+- "View" > "Character Encoding" > "Unicode (UTF-8)"
 
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
index 8b9cd8eb3f917e63074ba0ffae0e83b767a87176..264bcde0c51c55629066e9809af3bab99fe05c8c 100644 (file)
@@ -1245,8 +1245,9 @@ second).  The meanings of the columns are as follows, from left to right:
 
 The "intr" line gives counts of interrupts  serviced since boot time, for each
 of the  possible system interrupts.   The first  column  is the  total of  all
-interrupts serviced; each  subsequent column is the  total for that particular
-interrupt.
+interrupts serviced  including  unnumbered  architecture specific  interrupts;
+each  subsequent column is the  total for that particular numbered interrupt.
+Unnumbered interrupts are not shown, only summed into the total.
 
 The "ctxt" line gives the total number of context switches across all CPUs.
 
index 79f8257dd790703f8a9285e7b498f585df87e97b..2cc95ad466047b055d45f748619cf2d7231d5e93 100644 (file)
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
                from the max value.
                RW
 
+temp[1-*]_min_hyst
+               Temperature hysteresis value for min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the min value.
+               RW
+
 temp[1-*]_input Temperature input value.
                Unit: millidegree Celsius
                RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit    Temperature critical min value, typically lower than
                Unit: millidegree Celsius
                RW
 
+temp[1-*]_lcrit_hyst
+               Temperature hysteresis value for critical min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the critical min value.
+               RW
+
 temp[1-*]_offset
                Temperature offset which is added to the temperature reading
                by the chip.
index e6a72328154783fc6e983904658eb8eb8c0c4ee8..418020584ccc171b8ff079e496e73383f0f55c29 100644 (file)
@@ -188,6 +188,9 @@ shift
 #define CP_METHODREF 10
 #define CP_INTERFACEMETHODREF 11
 #define CP_NAMEANDTYPE 12
+#define CP_METHODHANDLE 15
+#define CP_METHODTYPE 16
+#define CP_INVOKEDYNAMIC 18
 
 /* Define some commonly used error messages */
 
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
                break;
        case CP_CLASS:
        case CP_STRING:
+       case CP_METHODTYPE:
                seekerr = fseek(classfile, 2, SEEK_CUR);
                break;
+       case CP_METHODHANDLE:
+               seekerr = fseek(classfile, 3, SEEK_CUR);
+               break;
        case CP_INTEGER:
        case CP_FLOAT:
        case CP_FIELDREF:
        case CP_METHODREF:
        case CP_INTERFACEMETHODREF:
        case CP_NAMEANDTYPE:
+       case CP_INVOKEDYNAMIC:
                seekerr = fseek(classfile, 4, SEEK_CUR);
                break;
        case CP_LONG:
index 81f940f4e88480d48c35fd7707d679d646ef0af8..e3ba753cb714949c4e26f898da31b6d0e6b03737 100644 (file)
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
   mark                                  skb->mark
   queue                                 skb->queue_mapping
   hatype                                skb->dev->type
-  rxhash                                skb->rxhash
+  rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
index 6fea79efb4cbfd31cc1d0155aef320b94b89da9e..38112d512f47db9ec9a70edae7c7df83b7d13119 100644 (file)
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
 
 Currently implemented fanout policies are:
 
-  - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+  - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
index 6846c7c622e339699b949edc56b2388a5db25ff3..cc4511177949e3539946f441166586e50212ac2c 100644 (file)
@@ -537,7 +537,7 @@ L:  linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridgers2013@gmail.com
+M:     Vince Bridgers <vbridgers2013@gmail.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -6514,10 +6514,10 @@ T:      git git://openrisc.net/~jonas/linux
 F:     arch/openrisc/
 
 OPENVSWITCH
-M:     Jesse Gross <jesse@nicira.com>
+M:     Pravin Shelar <pshelar@nicira.com>
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:     Maintained
 F:     net/openvswitch/
 
index 9d993787afe08acfe8ef397d7fc80a2fb357dd47..cf3412d78ff139c99458bf923872e7a57440853e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index f1432da7b4c0f015ff40d695a768c8f549839051..c5fa7a697fba2a13b0779e59904ec7c1428e8ef4 100644 (file)
        ENTRY_SAME(sched_setattr)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
-       ENTRY_COMP(renameat2)
+       ENTRY_SAME(renameat2)
 
        /* Nothing yet */
 
index 452d3ebd9d0fba3b513a6978b096bd22b27c1b46..e9f8fa9337fe1fae0d5d0ba9bf03a8426961a3c0 100644 (file)
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
                return NULL;
        memset(header, 0, sz);
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (bpfsize + sizeof(*header));
+       hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
        /* Insert random number of illegal instructions before BPF code
         * and make sure the first instruction starts at an even address.
         */
index fde5abaac0ccbf6ee8c9d7f3e198d2b93e1fd384..1a49ffdf9da91056cb24357b6fdefea772658201 100644 (file)
@@ -24,7 +24,8 @@
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  */
 #define        TLBTEMP_BASE            _AC(0x0000000006000000,UL)
-#define        TSBMAP_BASE             _AC(0x0000000008000000,UL)
+#define        TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
+#define        TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
 #define MODULES_VADDR          _AC(0x0000000010000000,UL)
 #define MODULES_LEN            _AC(0x00000000e0000000,UL)
 #define MODULES_END            _AC(0x00000000f0000000,UL)
index a364000ca1aa8a495f7e8b9bf59882350f41c6de..7f41d40b7e6e8ccf89b5ce12a9422bbf4e84ac2e 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
                        size_t count)
 {
        unsigned long val, err;
-       int ret = sscanf(buf, "%ld", &val);
+       int ret = sscanf(buf, "%lu", &val);
 
        if (ret != 1)
                return -EINVAL;
index 2c20ad63ddbf2bbf8a4da5e751e49650d8be7060..30eee6e8a81b2d45797aab304914b10571573b1a 100644 (file)
@@ -236,6 +236,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        VISEntryHalf
 
+       membar          #Sync
        alignaddr       %o1, %g0, %g0
 
        add             %o1, (64 - 1), %o4
index a8ff0d1a3b6999ab7c0f16de972d192a48a951ff..4ced3fc66130c30b8870c21cea57611056046d50 100644 (file)
@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-                                                        unsigned long addr)
-{
-       static int times;
-
-       if (times++ < 10)
-               printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-                      "reports 64-bit fault address [%lx]\n",
-                      current->comm, current->pid, addr);
-       show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                                goto intr_or_no_mm;
                        }
                }
-               if (unlikely((address >> 32) != 0)) {
-                       bogus_32bit_fault_address(regs, address);
+               if (unlikely((address >> 32) != 0))
                        goto intr_or_no_mm;
-               }
        }
 
        if (regs->tstate & TSTATE_PRIV) {
index f5d506fdddad3dea459aa97308e9a8872f34ffe8..fe19b81acc091b4d994da81f5580fe4438664388 100644 (file)
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        mm->context.tsb_block[tsb_idx].tsb_nentries =
                tsb_bytes / sizeof(struct tsb);
 
-       base = TSBMAP_BASE;
+       switch (tsb_idx) {
+       case MM_TSB_BASE:
+               base = TSBMAP_8K_BASE;
+               break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       case MM_TSB_HUGE:
+               base = TSBMAP_4M_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
        tte = pgprot_val(PAGE_KERNEL_LOCKED);
        tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
        BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
index aa333d9668866f808955209f8cd71737d447eafc..adb02aa62af5e310ff51ca720d1163b247489ccb 100644 (file)
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-       FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
index dc017735bb91b7b2ec61f333b091c63accdb921b..6d5663a599a7a362756b5bcb6d3ff3e30ee34bae 100644 (file)
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
        memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (proglen + sizeof(*header));
+       hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
        /* insert a random number of int3 instructions before BPF code */
        *image_ptr = &header->image[prandom_u32() % hole];
index a886713937fd05b38fc10866bbce5266ac0bfae1..d5d30ed863ceb904c6e19e245b94d7ddba40f88f 100644 (file)
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
                dma_unmap_page(dev, unmap->addr[i], unmap->len,
                               DMA_BIDIRECTIONAL);
        }
+       cnt = unmap->map_cnt;
        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 }
 
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
        memset(unmap, 0, sizeof(*unmap));
        kref_init(&unmap->kref);
        unmap->dev = dev;
+       unmap->map_cnt = nr;
 
        return unmap;
 }
index 766b68ed505c4d2b3964bfb1f0de6ab5ae1ff3a9..394cbc5c93e3600a8c918bfb673756bdd279ce7b 100644 (file)
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 
 static void mv_chan_activate(struct mv_xor_chan *chan)
 {
-       u32 activation;
-
        dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-       activation = readl_relaxed(XOR_ACTIVATION(chan));
-       activation |= 0x1;
-       writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+       /* writel ensures all descriptors are flushed before activation */
+       writel(BIT(0), XOR_ACTIVATION(chan));
 }
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
index 7762665ad8fdb5b461680ee84b7b224b77a5d4d5..876de9ac3793fd30af193ac7e8410818bdb56e7a 100644 (file)
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
        }
 
        if (outp == 8)
-               return false;
+               return conf;
 
        data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
        if (data == 0x0000)
index 43fec17ea540b6c53af704931d4baf29d2391e4e..bbf117be572f4617cff0fd06907fb2a13977ae93 100644 (file)
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
                case 0x00: return 2;
                case 0x19: return 1;
                case 0x1c: return 0;
+               case 0x1e: return 2;
                default:
                        break;
                }
index 68528619834a75eece9f3b6c6d59cd34a0b03f73..8149e7cf430330095da34f98d27f842832c2c91c 100644 (file)
@@ -1642,6 +1642,7 @@ struct radeon_vce {
        unsigned                fb_version;
        atomic_t                handles[RADEON_MAX_VCE_HANDLES];
        struct drm_file         *filp[RADEON_MAX_VCE_HANDLES];
+       unsigned                img_size[RADEON_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
 };
 
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
                               struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+                            (rdev->family == CHIP_MULLINS))
 
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
                              (rdev->ddev->pdev->device == 0x6850) || \
index b3633d9a531703a1cd4189c061a0907e89ee1085..9ab30976287d4c27e0dc94e0cadfa993e6ec77cc 100644 (file)
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
                }
        }
 
+       if (!found) {
+               while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+                       dhandle = ACPI_HANDLE(&pdev->dev);
+                       if (!dhandle)
+                               continue;
+
+                       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
        if (!found)
                return false;
 
index 408b6ac53f0b808c525f86b99e90922716f5e6dc..f00dbbf4d806511a86b034a73c2d68adac971b99 100644 (file)
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
 
        /* avoid high jitter with small fractional dividers */
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
-               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
                if (fb_div < fb_div_min) {
                        unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
                        fb_div *= tmp;
index 0cc47f12d9957d916b41acf4d3874fa062313175..eaaedba0467595aaced591c6d70666c75ac97211 100644 (file)
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return r;
                }
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
-               }
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
 
-               /* map the ib pool buffer read only into
-                * virtual address space */
-               bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                        rdev->ring_tmp_bo.bo);
-               r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
-                                         RADEON_VM_PAGE_READABLE |
-                                         RADEON_VM_PAGE_SNOOPED);
+                       /* map the ib pool buffer read only into
+                        * virtual address space */
+                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+                                                rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                                                 RADEON_VM_PAGE_READABLE |
+                                                 RADEON_VM_PAGE_SNOOPED);
 
-               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
+                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
                }
-
                file_priv->driver_priv = fpriv;
        }
 
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                struct radeon_bo_va *bo_va;
                int r;
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (!r) {
-                       bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                 rdev->ring_tmp_bo.bo);
-                       if (bo_va)
-                               radeon_vm_bo_rmv(rdev, bo_va);
-                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (!r) {
+                               bo_va = radeon_vm_bo_find(&fpriv->vm,
+                                                         rdev->ring_tmp_bo.bo);
+                               if (bo_va)
+                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       }
                }
 
                radeon_vm_fini(rdev, &fpriv->vm);
index 19bec0dbfa38bf052db75cb52401adfcb29ac986..4faa4d6f9bb4f0616e0575d916069b47fa9389ed 100644 (file)
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+                       if ((lobj->alt_domain & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
        rdev = rbo->rdev;
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               size = bo->mem.num_pages << PAGE_SHIFT;
-               offset = bo->mem.start << PAGE_SHIFT;
-               if ((offset + size) > rdev->mc.visible_vram_size) {
-                       /* hurrah the memory is not visible ! */
-                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
-                       if (unlikely(r != 0))
-                               return r;
-                       offset = bo->mem.start << PAGE_SHIFT;
-                       /* this should not happen */
-                       if ((offset + size) > rdev->mc.visible_vram_size)
-                               return -EINVAL;
-               }
+       if (bo->mem.mem_type != TTM_PL_VRAM)
+               return 0;
+
+       size = bo->mem.num_pages << PAGE_SHIFT;
+       offset = bo->mem.start << PAGE_SHIFT;
+       if ((offset + size) <= rdev->mc.visible_vram_size)
+               return 0;
+
+       /* hurrah the memory is not visible ! */
+       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       r = ttm_bo_validate(bo, &rbo->placement, false, false);
+       if (unlikely(r == -ENOMEM)) {
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+               return ttm_bo_validate(bo, &rbo->placement, false, false);
+       } else if (unlikely(r != 0)) {
+               return r;
        }
+
+       offset = bo->mem.start << PAGE_SHIFT;
+       /* this should never happen */
+       if ((offset + size) > rdev->mc.visible_vram_size)
+               return -EINVAL;
+
        return 0;
 }
 
index f30b8426eee233b286e45ca0fea9a39de8b47ff0..53d6e1bb48dc326bb3487fde5f02749df56d4e57 100644 (file)
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set profile when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set method when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               count = -EINVAL;
+               goto fail;
+       }
+
        /* we don't support the legacy modes with dpm */
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
                count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set dpm state when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
                        (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
        enum radeon_dpm_forced_level level;
        int ret = 0;
 
+       /* Can't force performance level when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
                                      char *buf)
 {
        struct radeon_device *rdev = dev_get_drvdata(dev);
+       struct drm_device *ddev = rdev->ddev;
        int temp;
 
+       /* Can't get temperature when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (rdev->asic->pm.get_temperature)
                temp = radeon_get_temperature(rdev);
        else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_device *ddev = rdev->ddev;
 
-       if (rdev->pm.dpm_enabled) {
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               seq_printf(m, "PX asic powered off\n");
+       } else if (rdev->pm.dpm_enabled) {
                mutex_lock(&rdev->pm.mutex);
                if (rdev->asic->dpm.debugfs_print_current_performance_level)
                        radeon_dpm_debugfs_print_current_performance_level(rdev, m);
index f73324c81491fe52c472fdd8bab6558c347db2c3..3971d968af6c0d86d0ba6e6f08f94d26714e17ec 100644 (file)
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
  * @p: parser context
  * @lo: address of lower dword
  * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
  *
  * Patch relocation inside command stream with real buffer address
  */
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+                       unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       uint64_t offset;
+       struct radeon_cs_reloc *reloc;
+       uint64_t start, end, offset;
        unsigned idx;
 
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,14 +465,59 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
                return -EINVAL;
        }
 
-       offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+       reloc = p->relocs_ptr[(idx / 4)];
+       start = reloc->gpu_offset;
+       end = start + radeon_bo_size(reloc->robj);
+       start += offset;
 
-        p->ib.ptr[lo] = offset & 0xFFFFFFFF;
-        p->ib.ptr[hi] = offset >> 32;
+       p->ib.ptr[lo] = start & 0xFFFFFFFF;
+       p->ib.ptr[hi] = start >> 32;
+
+       if (end <= start) {
+               DRM_ERROR("invalid reloc offset %llX!\n", offset);
+               return -EINVAL;
+       }
+       if ((end - start) < size) {
+               DRM_ERROR("buffer to small (%d / %d)!\n",
+                       (unsigned)(end - start), size);
+               return -EINVAL;
+       }
 
        return 0;
 }
 
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+       unsigned i;
+
+       /* validate the handle */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+                       return i;
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+                       p->rdev->vce.filp[i] = p->filp;
+                       p->rdev->vce.img_size[i] = 0;
+                       return i;
+               }
+       }
+
+       DRM_ERROR("No more free VCE handles!\n");
+       return -EINVAL;
+}
+
 /**
  * radeon_vce_cs_parse - parse and validate the command stream
  *
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
  */
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
-       uint32_t handle = 0;
-       bool destroy = false;
+       int session_idx = -1;
+       bool destroyed = false;
+       uint32_t tmp, handle = 0;
+       uint32_t *size = &tmp;
        int i, r;
 
        while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (destroyed) {
+                       DRM_ERROR("No other command allowed after destroy!\n");
+                       return -EINVAL;
+               }
+
                switch (cmd) {
                case 0x00000001: // session
                        handle = radeon_get_ib_value(p, p->idx + 2);
+                       session_idx = radeon_vce_validate_handle(p, handle);
+                       if (session_idx < 0)
+                               return session_idx;
+                       size = &p->rdev->vce.img_size[session_idx];
                        break;
 
                case 0x00000002: // task info
+                       break;
+
                case 0x01000001: // create
+                       *size = radeon_get_ib_value(p, p->idx + 8) *
+                               radeon_get_ib_value(p, p->idx + 10) *
+                               8 * 3 / 2;
+                       break;
+
                case 0x04000001: // config extension
                case 0x04000002: // pic control
                case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        break;
 
                case 0x03000001: // encode
-                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+                                               *size);
                        if (r)
                                return r;
 
-                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+                                               *size / 3);
                        if (r)
                                return r;
                        break;
 
                case 0x02000001: // destroy
-                       destroy = true;
+                       destroyed = true;
                        break;
 
                case 0x05000001: // context buffer
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               *size * 2);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000004: // video bitstream buffer
+                       tmp = radeon_get_ib_value(p, p->idx + 4);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               tmp);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000005: // feedback buffer
-                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               4096);
                        if (r)
                                return r;
                        break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (session_idx == -1) {
+                       DRM_ERROR("no session command at start of IB\n");
+                       return -EINVAL;
+               }
+
                p->idx += len / 4;
        }
 
-       if (destroy) {
+       if (destroyed) {
                /* IB contains a destroy msg, free the handle */
                for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
-               return 0;
-        }
-
-       /* create or encode, validate the handle */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-                       return 0;
        }
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
-                       p->rdev->vce.filp[i] = p->filp;
-                       return 0;
-               }
-       }
-
-       DRM_ERROR("No more free VCE handles!\n");
-       return -EINVAL;
+       return 0;
 }
 
 /**
index 2aae6ce49d3286888ea22347b60d71cc5220eef4..d9ab99f47612743bb41361a8cca8114999e52fca 100644 (file)
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        ndw = 64;
 
        /* assume the worst case */
-       ndw += vm->max_pde_used * 12;
+       ndw += vm->max_pde_used * 16;
 
        /* update too big for an IB */
        if (ndw > 0xfffff)
index 683532f849311d1ca19de3daccb168b368d4c0b5..7321283602ce0c1d8429193efe4221ba26a33761 100644 (file)
 #define                SPLL_CHG_STATUS                         (1 << 1)
 #define        SPLL_CNTL_MODE                                  0x618
 #define                SPLL_SW_DIR_CONTROL                     (1 << 0)
-#      define SPLL_REFCLK_SEL(x)                       ((x) << 8)
-#      define SPLL_REFCLK_SEL_MASK                     0xFF00
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
 
 #define        CG_SPLL_SPREAD_SPECTRUM                         0x620
 #define                SSEN                                    (1 << 0)
index bc196f49ec53be2e13959a9ebca9aa9c82edcfff..4af0da96c2e22a6d18474cf60e34649acc11167b 100644 (file)
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
 
 config SENSORS_NTC_THERMISTOR
        tristate "NTC thermistor support"
-       depends on (!OF && !IIO) || (OF && IIO)
+       depends on !OF || IIO=n || IIO
        help
          This driver supports NTC thermistors sensor reading and its
          interpretation. The driver can also monitor the temperature and
index 8a17f01e8672065d7099e4c3ae156f93705eade4..e76feb86a1d4c0b07ab773675ef1244b88ee9b98 100644 (file)
@@ -44,6 +44,7 @@ struct ntc_compensation {
        unsigned int    ohm;
 };
 
+/* Order matters, ntc_match references the entries by index */
 static const struct platform_device_id ntc_thermistor_id[] = {
        { "ncp15wb473", TYPE_NCPXXWB473 },
        { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
        char name[PLATFORM_NAME_SIZE];
 };
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 {
        struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 
 static const struct of_device_id ntc_match[] = {
        { .compatible = "ntc,ncp15wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[0] },
        { .compatible = "ntc,ncp18wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[1] },
        { .compatible = "ntc,ncp21wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[2] },
        { .compatible = "ntc,ncp03wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[3] },
        { .compatible = "ntc,ncp15wl333",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+               .data = &ntc_thermistor_id[4] },
        { },
 };
 MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        return NULL;
 }
 
+#define ntc_match      NULL
+
 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
 { }
 #endif
index 1b6dbe156a3708692a743cd58fc3351d4b01e533..199c7896f08188ca40fa9cd30462f0631d387338 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
 #include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
 }
 #endif
 
+#define MLX4_IB_INVALID_MAC    ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+                              struct net_device *dev,
+                              int port)
+{
+       u64 new_smac = 0;
+       u64 release_mac = MLX4_IB_INVALID_MAC;
+       struct mlx4_ib_qp *qp;
+
+       read_lock(&dev_base_lock);
+       new_smac = mlx4_mac_to_u64(dev->dev_addr);
+       read_unlock(&dev_base_lock);
+
+       mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+       qp = ibdev->qp1_proxy[port - 1];
+       if (qp) {
+               int new_smac_index;
+               u64 old_smac = qp->pri.smac;
+               struct mlx4_update_qp_params update_params;
+
+               if (new_smac == old_smac)
+                       goto unlock;
+
+               new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+               if (new_smac_index < 0)
+                       goto unlock;
+
+               update_params.smac_index = new_smac_index;
+               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+                                  &update_params)) {
+                       release_mac = new_smac;
+                       goto unlock;
+               }
+
+               qp->pri.smac = new_smac;
+               qp->pri.smac_index = new_smac_index;
+
+               release_mac = old_smac;
+       }
+
+unlock:
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+       if (release_mac != MLX4_IB_INVALID_MAC)
+               mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
                                 struct mlx4_ib_dev *ibdev, u8 port)
 {
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        return 0;
 }
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+                                struct net_device *dev,
+                                unsigned long event)
+
 {
        struct mlx4_ib_iboe *iboe;
+       int update_qps_port = -1;
        int port;
 
        iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
                }
                curr_master = iboe->masters[port - 1];
 
+               if (dev == iboe->netdevs[port - 1] &&
+                   (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+                    event == NETDEV_UP || event == NETDEV_CHANGE))
+                       update_qps_port = port;
+
                if (curr_netdev) {
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
        }
 
        spin_unlock(&iboe->lock);
+
+       if (update_qps_port > 0)
+               mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 
 static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
-       mlx4_ib_scan_netdevs(ibdev);
+       mlx4_ib_scan_netdevs(ibdev, dev, event);
 
        return NOTIFY_DONE;
 }
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                goto err_map;
 
        for (i = 0; i < ibdev->num_ports; ++i) {
+               mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
                        err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                for (i = 1 ; i <= ibdev->num_ports ; ++i)
                        reset_gid_table(ibdev, i);
                rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev);
+               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
                rtnl_unlock();
                mlx4_ib_init_gid_table(ibdev);
        }
index f589522fddfd9efa4e32fdd0a7e8f63e49a54927..66b0b7dbd9f41cac95cfb76b4e766249e1abd83b 100644 (file)
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
        int steer_qpn_count;
        int steer_qpn_base;
        int steering_support;
+       struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+       /* lock when destroying qp1_proxy and getting netdev events */
+       struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
 };
 
 struct ib_event_work {
index 41308af4163c3dc852adc23f983bdd86d279374e..dc57482ae7af2b2ed8935676a67b459847b3302c 100644 (file)
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
        if (is_qp0(dev, mqp))
                mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
+       if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+               mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+               dev->qp1_proxy[mqp->port - 1] = NULL;
+               mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+       }
+
        pd = get_pd(mqp);
        destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
                                if (err)
                                        return -EINVAL;
+                               if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+                                       dev->qp1_proxy[qp->port - 1] = qp;
                        }
                }
        }
index a22b537caacd88f8794094c32f8be6e6e0bc24b0..d260605e6d5fb1b0cb6ca30c5d7e960ab43b4999 100644 (file)
@@ -178,13 +178,13 @@ config TEGRA_IOMMU_SMMU
 
 config EXYNOS_IOMMU
        bool "Exynos IOMMU Support"
-       depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+       depends on ARCH_EXYNOS
        select IOMMU_API
        help
-         Support for the IOMMU(System MMU) of Samsung Exynos application
-         processor family. This enables H/W multimedia accellerators to see
-         non-linear physical memory chunks as linear memory in their
-         address spaces
+         Support for the IOMMU (System MMU) of Samsung Exynos application
+         processor family. This enables H/W multimedia accelerators to see
+         non-linear physical memory chunks as linear memory in their
+         address space.
 
          If unsure, say N here.
 
@@ -193,9 +193,9 @@ config EXYNOS_IOMMU_DEBUG
        depends on EXYNOS_IOMMU
        help
          Select this to see the detailed log message that shows what
-         happens in the IOMMU driver
+         happens in the IOMMU driver.
 
-         Say N unless you need kernel log message for IOMMU debugging
+         Say N unless you need kernel log message for IOMMU debugging.
 
 config SHMOBILE_IPMMU
        bool
index 57068e8035b5b2553141cb8f1a6ad27f334ea6ec..4aec6a29e316b56b2be4ef2a48bfed8bcd05ea9d 100644 (file)
@@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void)
 {
        struct iommu_dev_data *dev_data;
        struct pci_dev *dev = NULL;
-       struct amd_iommu *iommu;
-       u16 devid;
        int ret;
 
        ret = alloc_passthrough_domain();
@@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void)
                dev_data = get_dev_data(&dev->dev);
                dev_data->passthrough = true;
 
-               devid = get_device_id(&dev->dev);
-
-               iommu = amd_iommu_rlookup_table[devid];
-               if (!iommu)
-                       continue;
-
                attach_device(&dev->dev, pt_domain);
        }
 
index 203b2e6a91cfca1184eb37df8761b339bc2a3728..d4daa05efe60d6d51edfe653793c77eecdce6c4f 100644 (file)
@@ -45,6 +45,8 @@ struct pri_queue {
 struct pasid_state {
        struct list_head list;                  /* For global state-list */
        atomic_t count;                         /* Reference count */
+       atomic_t mmu_notifier_count;            /* Counting nested mmu_notifier
+                                                  calls */
        struct task_struct *task;               /* Task bound to this PASID */
        struct mm_struct *mm;                   /* mm_struct for the faults */
        struct mmu_notifier mn;                 /* mmu_otifier handle */
@@ -56,6 +58,8 @@ struct pasid_state {
 };
 
 struct device_state {
+       struct list_head list;
+       u16 devid;
        atomic_t count;
        struct pci_dev *pdev;
        struct pasid_state **states;
@@ -81,13 +85,9 @@ struct fault {
        u16 flags;
 };
 
-static struct device_state **state_table;
+static LIST_HEAD(state_list);
 static spinlock_t state_lock;
 
-/* List and lock for all pasid_states */
-static LIST_HEAD(pasid_state_list);
-static DEFINE_SPINLOCK(ps_lock);
-
 static struct workqueue_struct *iommu_wq;
 
 /*
@@ -99,7 +99,6 @@ static u64 *empty_page_table;
 
 static void free_pasid_states(struct device_state *dev_state);
 static void unbind_pasid(struct device_state *dev_state, int pasid);
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
 
 static u16 device_id(struct pci_dev *pdev)
 {
@@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev)
        return devid;
 }
 
+static struct device_state *__get_device_state(u16 devid)
+{
+       struct device_state *dev_state;
+
+       list_for_each_entry(dev_state, &state_list, list) {
+               if (dev_state->devid == devid)
+                       return dev_state;
+       }
+
+       return NULL;
+}
+
 static struct device_state *get_device_state(u16 devid)
 {
        struct device_state *dev_state;
        unsigned long flags;
 
        spin_lock_irqsave(&state_lock, flags);
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state != NULL)
                atomic_inc(&dev_state->count);
        spin_unlock_irqrestore(&state_lock, flags);
@@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state)
        free_device_state(dev_state);
 }
 
-static struct notifier_block profile_nb = {
-       .notifier_call = task_exit,
-};
-
-static void link_pasid_state(struct pasid_state *pasid_state)
-{
-       spin_lock(&ps_lock);
-       list_add_tail(&pasid_state->list, &pasid_state_list);
-       spin_unlock(&ps_lock);
-}
-
-static void __unlink_pasid_state(struct pasid_state *pasid_state)
-{
-       list_del(&pasid_state->list);
-}
-
-static void unlink_pasid_state(struct pasid_state *pasid_state)
-{
-       spin_lock(&ps_lock);
-       __unlink_pasid_state(pasid_state);
-       spin_unlock(&ps_lock);
-}
-
 /* Must be called under dev_state->lock */
 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
                                                  int pasid, bool alloc)
@@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid)
        if (pasid_state == NULL)
                return;
 
-       unlink_pasid_state(pasid_state);
        __unbind_pasid(pasid_state);
        put_pasid_state_wait(pasid_state); /* Reference taken in this function */
 }
@@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state)
                        continue;
 
                put_pasid_state(pasid_state);
-               unbind_pasid(dev_state, i);
+
+               /*
+                * This will call the mn_release function and
+                * unbind the PASID
+                */
+               mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
        }
 
        if (dev_state->pasid_levels == 2)
@@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
-                                 __pa(empty_page_table));
+       if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+               amd_iommu_domain_set_gcr3(dev_state->domain,
+                                         pasid_state->pasid,
+                                         __pa(empty_page_table));
+       }
 }
 
 static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
-                                 __pa(pasid_state->mm->pgd));
+       if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+               amd_iommu_domain_set_gcr3(dev_state->domain,
+                                         pasid_state->pasid,
+                                         __pa(pasid_state->mm->pgd));
+       }
+}
+
+static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+
+       might_sleep();
+
+       pasid_state = mn_to_state(mn);
+       dev_state   = pasid_state->device_state;
+
+       if (pasid_state->device_state->inv_ctx_cb)
+               dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
+
+       unbind_pasid(dev_state, pasid_state->pasid);
 }
 
 static struct mmu_notifier_ops iommu_mn = {
+       .release                = mn_release,
        .clear_flush_young      = mn_clear_flush_young,
        .change_pte             = mn_change_pte,
        .invalidate_page        = mn_invalidate_page,
@@ -606,53 +621,6 @@ static struct notifier_block ppr_nb = {
        .notifier_call = ppr_notifier,
 };
 
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
-{
-       struct pasid_state *pasid_state;
-       struct task_struct *task;
-
-       task = data;
-
-       /*
-        * Using this notifier is a hack - but there is no other choice
-        * at the moment. What I really want is a sleeping notifier that
-        * is called when an MM goes down. But such a notifier doesn't
-        * exist yet. The notifier needs to sleep because it has to make
-        * sure that the device does not use the PASID and the address
-        * space anymore before it is destroyed. This includes waiting
-        * for pending PRI requests to pass the workqueue. The
-        * MMU-Notifiers would be a good fit, but they use RCU and so
-        * they are not allowed to sleep. Lets see how we can solve this
-        * in a more intelligent way in the future.
-        */
-again:
-       spin_lock(&ps_lock);
-       list_for_each_entry(pasid_state, &pasid_state_list, list) {
-               struct device_state *dev_state;
-               int pasid;
-
-               if (pasid_state->task != task)
-                       continue;
-
-               /* Drop Lock and unbind */
-               spin_unlock(&ps_lock);
-
-               dev_state = pasid_state->device_state;
-               pasid     = pasid_state->pasid;
-
-               if (pasid_state->device_state->inv_ctx_cb)
-                       dev_state->inv_ctx_cb(dev_state->pdev, pasid);
-
-               unbind_pasid(dev_state, pasid);
-
-               /* Task may be in the list multiple times */
-               goto again;
-       }
-       spin_unlock(&ps_lock);
-
-       return NOTIFY_OK;
-}
-
 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                         struct task_struct *task)
 {
@@ -682,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                goto out;
 
        atomic_set(&pasid_state->count, 1);
+       atomic_set(&pasid_state->mmu_notifier_count, 0);
        init_waitqueue_head(&pasid_state->wq);
        spin_lock_init(&pasid_state->lock);
 
@@ -705,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
        if (ret)
                goto out_clear_state;
 
-       link_pasid_state(pasid_state);
-
        return 0;
 
 out_clear_state:
@@ -727,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid);
 
 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
 {
+       struct pasid_state *pasid_state;
        struct device_state *dev_state;
        u16 devid;
 
@@ -743,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
        if (pasid < 0 || pasid >= dev_state->max_pasids)
                goto out;
 
-       unbind_pasid(dev_state, pasid);
+       pasid_state = get_pasid_state(dev_state, pasid);
+       if (pasid_state == NULL)
+               goto out;
+       /*
+        * Drop reference taken here. We are safe because we still hold
+        * the reference taken in the amd_iommu_bind_pasid function.
+        */
+       put_pasid_state(pasid_state);
+
+       /* This will call the mn_release function and unbind the PASID */
+       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
 
 out:
        put_device_state(dev_state);
@@ -773,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 
        spin_lock_init(&dev_state->lock);
        init_waitqueue_head(&dev_state->wq);
-       dev_state->pdev = pdev;
+       dev_state->pdev  = pdev;
+       dev_state->devid = devid;
 
        tmp = pasids;
        for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -803,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 
        spin_lock_irqsave(&state_lock, flags);
 
-       if (state_table[devid] != NULL) {
+       if (__get_device_state(devid) != NULL) {
                spin_unlock_irqrestore(&state_lock, flags);
                ret = -EBUSY;
                goto out_free_domain;
        }
 
-       state_table[devid] = dev_state;
+       list_add_tail(&dev_state->list, &state_list);
 
        spin_unlock_irqrestore(&state_lock, flags);
 
@@ -841,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
 
        spin_lock_irqsave(&state_lock, flags);
 
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state == NULL) {
                spin_unlock_irqrestore(&state_lock, flags);
                return;
        }
 
-       state_table[devid] = NULL;
+       list_del(&dev_state->list);
 
        spin_unlock_irqrestore(&state_lock, flags);
 
@@ -874,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
        spin_lock_irqsave(&state_lock, flags);
 
        ret = -EINVAL;
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state == NULL)
                goto out_unlock;
 
@@ -905,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
        spin_lock_irqsave(&state_lock, flags);
 
        ret = -EINVAL;
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state == NULL)
                goto out_unlock;
 
@@ -922,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
 
 static int __init amd_iommu_v2_init(void)
 {
-       size_t state_table_size;
        int ret;
 
        pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
@@ -938,16 +916,10 @@ static int __init amd_iommu_v2_init(void)
 
        spin_lock_init(&state_lock);
 
-       state_table_size = MAX_DEVICES * sizeof(struct device_state *);
-       state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                              get_order(state_table_size));
-       if (state_table == NULL)
-               return -ENOMEM;
-
        ret = -ENOMEM;
        iommu_wq = create_workqueue("amd_iommu_v2");
        if (iommu_wq == NULL)
-               goto out_free;
+               goto out;
 
        ret = -ENOMEM;
        empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
@@ -955,29 +927,24 @@ static int __init amd_iommu_v2_init(void)
                goto out_destroy_wq;
 
        amd_iommu_register_ppr_notifier(&ppr_nb);
-       profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
 
        return 0;
 
 out_destroy_wq:
        destroy_workqueue(iommu_wq);
 
-out_free:
-       free_pages((unsigned long)state_table, get_order(state_table_size));
-
+out:
        return ret;
 }
 
 static void __exit amd_iommu_v2_exit(void)
 {
        struct device_state *dev_state;
-       size_t state_table_size;
        int i;
 
        if (!amd_iommu_v2_supported())
                return;
 
-       profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
        amd_iommu_unregister_ppr_notifier(&ppr_nb);
 
        flush_workqueue(iommu_wq);
@@ -1000,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void)
 
        destroy_workqueue(iommu_wq);
 
-       state_table_size = MAX_DEVICES * sizeof(struct device_state *);
-       free_pages((unsigned long)state_table, get_order(state_table_size));
-
        free_page((unsigned long)empty_page_table);
 }
 
index 647c3c7fd7428f31dd2b313a1fc8b29ac33b482c..1599354e974d200555218509ba87ed4a65647027 100644 (file)
@@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
        for (i = 0; i < master->num_streamids; ++i) {
                u32 idx, s2cr;
                idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
-               s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+               s2cr = S2CR_TYPE_TRANS |
                       (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
                writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
        }
@@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * allocation (PTRS_PER_PGD).
         */
 #ifdef CONFIG_64BIT
-       smmu->s1_output_size = min(39UL, size);
+       smmu->s1_output_size = min((unsigned long)VA_BITS, size);
 #else
        smmu->s1_output_size = min(32UL, size);
 #endif
index 074018979cdfb047f96619bf050b0f26ba5a623f..99054d2c040d6e2c3b20f85e4e1e5c5c36682fa5 100644 (file)
@@ -29,7 +29,8 @@
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 
-#include <mach/sysmmu.h>
+typedef u32 sysmmu_iova_t;
+typedef u32 sysmmu_pte_t;
 
 /* We does not consider super section mapping (16MB) */
 #define SECT_ORDER 20
 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
 
-#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
-#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
+                          ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
+#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
+#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
+                         ((*(sent) & 3) == 1))
 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
 
 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
 
+static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
+{
+       return iova & (size - 1);
+}
+
 #define section_phys(sent) (*(sent) & SECT_MASK)
-#define section_offs(iova) ((iova) & 0xFFFFF)
+#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
-#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
-#define spage_offs(iova) ((iova) & 0xFFF)
-
-#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
-#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
 
 #define NUM_LV1ENTRIES 4096
-#define NUM_LV2ENTRIES 256
+#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
+
+static u32 lv1ent_offset(sysmmu_iova_t iova)
+{
+       return iova >> SECT_ORDER;
+}
+
+static u32 lv2ent_offset(sysmmu_iova_t iova)
+{
+       return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
+}
 
-#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
 
 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
 
 #define CTRL_BLOCK     0x7
 #define CTRL_DISABLE   0x0
 
+#define CFG_LRU                0x1
+#define CFG_QOS(n)     ((n & 0xF) << 7)
+#define CFG_MASK       0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
+#define CFG_ACGEN      (1 << 24) /* System MMU 3.3 only */
+#define CFG_SYSSEL     (1 << 22) /* System MMU 3.2 only */
+#define CFG_FLPDCACHE  (1 << 20) /* System MMU 3.2+ only */
+
 #define REG_MMU_CTRL           0x000
 #define REG_MMU_CFG            0x004
 #define REG_MMU_STATUS         0x008
 
 #define REG_MMU_VERSION                0x034
 
+#define MMU_MAJ_VER(val)       ((val) >> 7)
+#define MMU_MIN_VER(val)       ((val) & 0x7F)
+#define MMU_RAW_VER(reg)       (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
+
+#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
+
 #define REG_PB0_SADDR          0x04C
 #define REG_PB0_EADDR          0x050
 #define REG_PB1_SADDR          0x054
 #define REG_PB1_EADDR          0x058
 
-static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+#define has_sysmmu(dev)                (dev->archdata.iommu != NULL)
+
+static struct kmem_cache *lv2table_kmem_cache;
+static sysmmu_pte_t *zero_lv2_table;
+#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
+
+static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
 {
        return pgtable + lv1ent_offset(iova);
 }
 
-static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
 {
-       return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+       return (sysmmu_pte_t *)phys_to_virt(
+                               lv2table_base(sent)) + lv2ent_offset(iova);
 }
 
 enum exynos_sysmmu_inttype {
@@ -124,16 +161,6 @@ enum exynos_sysmmu_inttype {
        SYSMMU_FAULTS_NUM
 };
 
-/*
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @itype
- *                is SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
- */
-typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
-                       unsigned long pgtable_base, unsigned long fault_addr);
-
 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
        REG_PAGE_FAULT_ADDR,
        REG_AR_FAULT_ADDR,
@@ -157,27 +184,34 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
        "UNKNOWN FAULT"
 };
 
+/* attached to dev.archdata.iommu of the master device */
+struct exynos_iommu_owner {
+       struct list_head client; /* entry of exynos_iommu_domain.clients */
+       struct device *dev;
+       struct device *sysmmu;
+       struct iommu_domain *domain;
+       void *vmm_data;         /* IO virtual memory manager's data */
+       spinlock_t lock;        /* Lock to preserve consistency of System MMU */
+};
+
 struct exynos_iommu_domain {
        struct list_head clients; /* list of sysmmu_drvdata.node */
-       unsigned long *pgtable; /* lv1 page table, 16KB */
+       sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
        short *lv2entcnt; /* free lv2 entry counter for each section */
        spinlock_t lock; /* lock for this structure */
        spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
 };
 
 struct sysmmu_drvdata {
-       struct list_head node; /* entry of exynos_iommu_domain.clients */
        struct device *sysmmu;  /* System MMU's device descriptor */
-       struct device *dev;     /* Owner of system MMU */
-       char *dbgname;
-       int nsfrs;
-       void __iomem **sfrbases;
-       struct clk *clk[2];
+       struct device *master;  /* Owner of system MMU */
+       void __iomem *sfrbase;
+       struct clk *clk;
+       struct clk *clk_master;
        int activations;
-       rwlock_t lock;
+       spinlock_t lock;
        struct iommu_domain *domain;
-       sysmmu_fault_handler_t fault_handler;
-       unsigned long pgtable;
+       phys_addr_t pgtable;
 };
 
 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
@@ -204,6 +238,11 @@ static void sysmmu_unblock(void __iomem *sfrbase)
        __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
 }
 
+static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
+{
+       return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
+}
+
 static bool sysmmu_block(void __iomem *sfrbase)
 {
        int i = 120;
@@ -226,434 +265,428 @@ static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
 }
 
 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
-                                               unsigned long iova)
+                               sysmmu_iova_t iova, unsigned int num_inv)
 {
-       __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+       unsigned int i;
+
+       for (i = 0; i < num_inv; i++) {
+               __raw_writel((iova & SPAGE_MASK) | 1,
+                               sfrbase + REG_MMU_FLUSH_ENTRY);
+               iova += SPAGE_SIZE;
+       }
 }
 
 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
-                                      unsigned long pgd)
+                                      phys_addr_t pgd)
 {
-       __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
        __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
 
        __sysmmu_tlb_invalidate(sfrbase);
 }
 
-static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
-                                               unsigned long size, int idx)
-{
-       __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
-       __raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
-}
-
-static void __set_fault_handler(struct sysmmu_drvdata *data,
-                                       sysmmu_fault_handler_t handler)
-{
-       unsigned long flags;
-
-       write_lock_irqsave(&data->lock, flags);
-       data->fault_handler = handler;
-       write_unlock_irqrestore(&data->lock, flags);
-}
-
-void exynos_sysmmu_set_fault_handler(struct device *dev,
-                                       sysmmu_fault_handler_t handler)
-{
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-
-       __set_fault_handler(data, handler);
-}
-
-static int default_fault_handler(enum exynos_sysmmu_inttype itype,
-                    unsigned long pgtable_base, unsigned long fault_addr)
+static void show_fault_information(const char *name,
+               enum exynos_sysmmu_inttype itype,
+               phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
 {
-       unsigned long *ent;
+       sysmmu_pte_t *ent;
 
        if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
                itype = SYSMMU_FAULT_UNKNOWN;
 
-       pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
-                       sysmmu_fault_name[itype], fault_addr, pgtable_base);
+       pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
+               sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
 
-       ent = section_entry(__va(pgtable_base), fault_addr);
-       pr_err("\tLv1 entry: 0x%lx\n", *ent);
+       ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
+       pr_err("\tLv1 entry: %#x\n", *ent);
 
        if (lv1ent_page(ent)) {
                ent = page_entry(ent, fault_addr);
-               pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+               pr_err("\t Lv2 entry: %#x\n", *ent);
        }
-
-       pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
-
-       BUG();
-
-       return 0;
 }
 
 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 {
        /* SYSMMU is in blocked when interrupt occurred. */
        struct sysmmu_drvdata *data = dev_id;
-       struct resource *irqres;
-       struct platform_device *pdev;
        enum exynos_sysmmu_inttype itype;
-       unsigned long addr = -1;
-
-       int i, ret = -ENOSYS;
-
-       read_lock(&data->lock);
+       sysmmu_iova_t addr = -1;
+       int ret = -ENOSYS;
 
        WARN_ON(!is_sysmmu_active(data));
 
-       pdev = to_platform_device(data->sysmmu);
-       for (i = 0; i < (pdev->num_resources / 2); i++) {
-               irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
-               if (irqres && ((int)irqres->start == irq))
-                       break;
-       }
+       spin_lock(&data->lock);
 
-       if (i == pdev->num_resources) {
+       if (!IS_ERR(data->clk_master))
+               clk_enable(data->clk_master);
+
+       itype = (enum exynos_sysmmu_inttype)
+               __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
+       if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
                itype = SYSMMU_FAULT_UNKNOWN;
+       else
+               addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
+
+       if (itype == SYSMMU_FAULT_UNKNOWN) {
+               pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
+                       __func__, dev_name(data->sysmmu));
+               pr_err("%s: Please check if IRQ is correctly configured.\n",
+                       __func__);
+               BUG();
        } else {
-               itype = (enum exynos_sysmmu_inttype)
-                       __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
-               if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
-                       itype = SYSMMU_FAULT_UNKNOWN;
-               else
-                       addr = __raw_readl(
-                               data->sfrbases[i] + fault_reg_offset[itype]);
+               unsigned int base =
+                               __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
+               show_fault_information(dev_name(data->sysmmu),
+                                       itype, base, addr);
+               if (data->domain)
+                       ret = report_iommu_fault(data->domain,
+                                       data->master, addr, itype);
        }
 
-       if (data->domain)
-               ret = report_iommu_fault(data->domain, data->dev,
-                               addr, itype);
+       /* fault is not recovered by fault handler */
+       BUG_ON(ret != 0);
 
-       if ((ret == -ENOSYS) && data->fault_handler) {
-               unsigned long base = data->pgtable;
-               if (itype != SYSMMU_FAULT_UNKNOWN)
-                       base = __raw_readl(
-                                       data->sfrbases[i] + REG_PT_BASE_ADDR);
-               ret = data->fault_handler(itype, base, addr);
-       }
+       __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
 
-       if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
-               __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
-       else
-               dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
-                               data->dbgname, sysmmu_fault_name[itype]);
+       sysmmu_unblock(data->sfrbase);
 
-       if (itype != SYSMMU_FAULT_UNKNOWN)
-               sysmmu_unblock(data->sfrbases[i]);
+       if (!IS_ERR(data->clk_master))
+               clk_disable(data->clk_master);
 
-       read_unlock(&data->lock);
+       spin_unlock(&data->lock);
 
        return IRQ_HANDLED;
 }
 
-static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
 {
+       if (!IS_ERR(data->clk_master))
+               clk_enable(data->clk_master);
+
+       __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
+       __raw_writel(0, data->sfrbase + REG_MMU_CFG);
+
+       clk_disable(data->clk);
+       if (!IS_ERR(data->clk_master))
+               clk_disable(data->clk_master);
+}
+
+static bool __sysmmu_disable(struct sysmmu_drvdata *data)
+{
+       bool disabled;
        unsigned long flags;
-       bool disabled = false;
-       int i;
 
-       write_lock_irqsave(&data->lock, flags);
+       spin_lock_irqsave(&data->lock, flags);
 
-       if (!set_sysmmu_inactive(data))
-               goto finish;
+       disabled = set_sysmmu_inactive(data);
 
-       for (i = 0; i < data->nsfrs; i++)
-               __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+       if (disabled) {
+               data->pgtable = 0;
+               data->domain = NULL;
 
-       if (data->clk[1])
-               clk_disable(data->clk[1]);
-       if (data->clk[0])
-               clk_disable(data->clk[0]);
+               __sysmmu_disable_nocount(data);
 
-       disabled = true;
-       data->pgtable = 0;
-       data->domain = NULL;
-finish:
-       write_unlock_irqrestore(&data->lock, flags);
+               dev_dbg(data->sysmmu, "Disabled\n");
+       } else  {
+               dev_dbg(data->sysmmu, "%d times left to disable\n",
+                                       data->activations);
+       }
 
-       if (disabled)
-               dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
-       else
-               dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
-                                       data->dbgname, data->activations);
+       spin_unlock_irqrestore(&data->lock, flags);
 
        return disabled;
 }
 
-/* __exynos_sysmmu_enable: Enables System MMU
- *
- * returns -error if an error occurred and System MMU is not enabled,
- * 0 if the System MMU has been just enabled and 1 if System MMU was already
- * enabled before.
- */
-static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
-                       unsigned long pgtable, struct iommu_domain *domain)
+static void __sysmmu_init_config(struct sysmmu_drvdata *data)
 {
-       int i, ret = 0;
-       unsigned long flags;
+       unsigned int cfg = CFG_LRU | CFG_QOS(15);
+       unsigned int ver;
+
+       ver = __raw_sysmmu_version(data);
+       if (MMU_MAJ_VER(ver) == 3) {
+               if (MMU_MIN_VER(ver) >= 2) {
+                       cfg |= CFG_FLPDCACHE;
+                       if (MMU_MIN_VER(ver) == 3) {
+                               cfg |= CFG_ACGEN;
+                               cfg &= ~CFG_LRU;
+                       } else {
+                               cfg |= CFG_SYSSEL;
+                       }
+               }
+       }
 
-       write_lock_irqsave(&data->lock, flags);
+       __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
+}
 
-       if (!set_sysmmu_active(data)) {
-               if (WARN_ON(pgtable != data->pgtable)) {
-                       ret = -EBUSY;
-                       set_sysmmu_inactive(data);
-               } else {
-                       ret = 1;
-               }
+static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
+{
+       if (!IS_ERR(data->clk_master))
+               clk_enable(data->clk_master);
+       clk_enable(data->clk);
 
-               dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
-               goto finish;
-       }
+       __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
 
-       if (data->clk[0])
-               clk_enable(data->clk[0]);
-       if (data->clk[1])
-               clk_enable(data->clk[1]);
+       __sysmmu_init_config(data);
 
-       data->pgtable = pgtable;
+       __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
 
-       for (i = 0; i < data->nsfrs; i++) {
-               __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+       __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
 
-               if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
-                       /* System MMU version is 3.x */
-                       __raw_writel((1 << 12) | (2 << 28),
-                                       data->sfrbases[i] + REG_MMU_CFG);
-                       __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
-                       __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
-               }
+       if (!IS_ERR(data->clk_master))
+               clk_disable(data->clk_master);
+}
 
-               __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+static int __sysmmu_enable(struct sysmmu_drvdata *data,
+                       phys_addr_t pgtable, struct iommu_domain *domain)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&data->lock, flags);
+       if (set_sysmmu_active(data)) {
+               data->pgtable = pgtable;
+               data->domain = domain;
+
+               __sysmmu_enable_nocount(data);
+
+               dev_dbg(data->sysmmu, "Enabled\n");
+       } else {
+               ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
+
+               dev_dbg(data->sysmmu, "already enabled\n");
        }
 
-       data->domain = domain;
+       if (WARN_ON(ret < 0))
+               set_sysmmu_inactive(data); /* decrement count */
 
-       dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
-finish:
-       write_unlock_irqrestore(&data->lock, flags);
+       spin_unlock_irqrestore(&data->lock, flags);
 
        return ret;
 }
 
-int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
+                                 struct iommu_domain *domain)
 {
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-       int ret;
+       int ret = 0;
+       unsigned long flags;
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct sysmmu_drvdata *data;
 
-       BUG_ON(!memblock_is_memory(pgtable));
+       BUG_ON(!has_sysmmu(dev));
 
-       ret = pm_runtime_get_sync(data->sysmmu);
-       if (ret < 0) {
-               dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
-               return ret;
-       }
+       spin_lock_irqsave(&owner->lock, flags);
 
-       ret = __exynos_sysmmu_enable(data, pgtable, NULL);
-       if (WARN_ON(ret < 0)) {
-               pm_runtime_put(data->sysmmu);
-               dev_err(data->sysmmu,
-                       "(%s) Already enabled with page table %#lx\n",
-                       data->dbgname, data->pgtable);
-       } else {
-               data->dev = dev;
-       }
+       data = dev_get_drvdata(owner->sysmmu);
+
+       ret = __sysmmu_enable(data, pgtable, domain);
+       if (ret >= 0)
+               data->master = dev;
+
+       spin_unlock_irqrestore(&owner->lock, flags);
 
        return ret;
 }
 
+int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
+{
+       BUG_ON(!memblock_is_memory(pgtable));
+
+       return __exynos_sysmmu_enable(dev, pgtable, NULL);
+}
+
 static bool exynos_sysmmu_disable(struct device *dev)
 {
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-       bool disabled;
+       unsigned long flags;
+       bool disabled = true;
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct sysmmu_drvdata *data;
+
+       BUG_ON(!has_sysmmu(dev));
+
+       spin_lock_irqsave(&owner->lock, flags);
 
-       disabled = __exynos_sysmmu_disable(data);
-       pm_runtime_put(data->sysmmu);
+       data = dev_get_drvdata(owner->sysmmu);
+
+       disabled = __sysmmu_disable(data);
+       if (disabled)
+               data->master = NULL;
+
+       spin_unlock_irqrestore(&owner->lock, flags);
 
        return disabled;
 }
 
-static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
+                                             sysmmu_iova_t iova)
+{
+       if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
+               __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
+                                           sysmmu_iova_t iova)
+{
+       unsigned long flags;
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
+
+       if (!IS_ERR(data->clk_master))
+               clk_enable(data->clk_master);
+
+       spin_lock_irqsave(&data->lock, flags);
+       if (is_sysmmu_active(data))
+               __sysmmu_tlb_invalidate_flpdcache(data, iova);
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       if (!IS_ERR(data->clk_master))
+               clk_disable(data->clk_master);
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
+                                       size_t size)
 {
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        unsigned long flags;
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct sysmmu_drvdata *data;
 
-       read_lock_irqsave(&data->lock, flags);
+       data = dev_get_drvdata(owner->sysmmu);
 
+       spin_lock_irqsave(&data->lock, flags);
        if (is_sysmmu_active(data)) {
-               int i;
-               for (i = 0; i < data->nsfrs; i++) {
-                       if (sysmmu_block(data->sfrbases[i])) {
-                               __sysmmu_tlb_invalidate_entry(
-                                               data->sfrbases[i], iova);
-                               sysmmu_unblock(data->sfrbases[i]);
-                       }
+               unsigned int num_inv = 1;
+
+               if (!IS_ERR(data->clk_master))
+                       clk_enable(data->clk_master);
+
+               /*
+                * L2TLB invalidation required
+                * 4KB page: 1 invalidation
+                * 64KB page: 16 invalidation
+                * 1MB page: 64 invalidation
+                * because it is set-associative TLB
+                * with 8-way and 64 sets.
+                * 1MB page can be cached in one of all sets.
+                * 64KB page can be one of 16 consecutive sets.
+                */
+               if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
+                       num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+
+               if (sysmmu_block(data->sfrbase)) {
+                       __sysmmu_tlb_invalidate_entry(
+                               data->sfrbase, iova, num_inv);
+                       sysmmu_unblock(data->sfrbase);
                }
+               if (!IS_ERR(data->clk_master))
+                       clk_disable(data->clk_master);
        } else {
-               dev_dbg(data->sysmmu,
-                       "(%s) Disabled. Skipping invalidating TLB.\n",
-                       data->dbgname);
+               dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
+                       iova);
        }
-
-       read_unlock_irqrestore(&data->lock, flags);
+       spin_unlock_irqrestore(&data->lock, flags);
 }
 
 void exynos_sysmmu_tlb_invalidate(struct device *dev)
 {
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        unsigned long flags;
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct sysmmu_drvdata *data;
 
-       read_lock_irqsave(&data->lock, flags);
+       data = dev_get_drvdata(owner->sysmmu);
 
+       spin_lock_irqsave(&data->lock, flags);
        if (is_sysmmu_active(data)) {
-               int i;
-               for (i = 0; i < data->nsfrs; i++) {
-                       if (sysmmu_block(data->sfrbases[i])) {
-                               __sysmmu_tlb_invalidate(data->sfrbases[i]);
-                               sysmmu_unblock(data->sfrbases[i]);
-                       }
+               if (!IS_ERR(data->clk_master))
+                       clk_enable(data->clk_master);
+               if (sysmmu_block(data->sfrbase)) {
+                       __sysmmu_tlb_invalidate(data->sfrbase);
+                       sysmmu_unblock(data->sfrbase);
                }
+               if (!IS_ERR(data->clk_master))
+                       clk_disable(data->clk_master);
        } else {
-               dev_dbg(data->sysmmu,
-                       "(%s) Disabled. Skipping invalidating TLB.\n",
-                       data->dbgname);
+               dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
        }
-
-       read_unlock_irqrestore(&data->lock, flags);
+       spin_unlock_irqrestore(&data->lock, flags);
 }
 
-static int exynos_sysmmu_probe(struct platform_device *pdev)
+static int __init exynos_sysmmu_probe(struct platform_device *pdev)
 {
-       int i, ret;
-       struct device *dev;
+       int irq, ret;
+       struct device *dev = &pdev->dev;
        struct sysmmu_drvdata *data;
+       struct resource *res;
 
-       dev = &pdev->dev;
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
-       if (!data) {
-               dev_dbg(dev, "Not enough memory\n");
-               ret = -ENOMEM;
-               goto err_alloc;
-       }
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       data->sfrbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(data->sfrbase))
+               return PTR_ERR(data->sfrbase);
 
-       ret = dev_set_drvdata(dev, data);
-       if (ret) {
-               dev_dbg(dev, "Unabled to initialize driver data\n");
-               goto err_init;
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(dev, "Unable to find IRQ resource\n");
+               return irq;
        }
 
-       data->nsfrs = pdev->num_resources / 2;
-       data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
-                                                               GFP_KERNEL);
-       if (data->sfrbases == NULL) {
-               dev_dbg(dev, "Not enough memory\n");
-               ret = -ENOMEM;
-               goto err_init;
-       }
-
-       for (i = 0; i < data->nsfrs; i++) {
-               struct resource *res;
-               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
-               if (!res) {
-                       dev_dbg(dev, "Unable to find IOMEM region\n");
-                       ret = -ENOENT;
-                       goto err_res;
-               }
-
-               data->sfrbases[i] = ioremap(res->start, resource_size(res));
-               if (!data->sfrbases[i]) {
-                       dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
-                                                       res->start);
-                       ret = -ENOENT;
-                       goto err_res;
-               }
+       ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
+                               dev_name(dev), data);
+       if (ret) {
+               dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+               return ret;
        }
 
-       for (i = 0; i < data->nsfrs; i++) {
-               ret = platform_get_irq(pdev, i);
-               if (ret <= 0) {
-                       dev_dbg(dev, "Unable to find IRQ resource\n");
-                       goto err_irq;
-               }
-
-               ret = request_irq(ret, exynos_sysmmu_irq, 0,
-                                       dev_name(dev), data);
+       data->clk = devm_clk_get(dev, "sysmmu");
+       if (IS_ERR(data->clk)) {
+               dev_err(dev, "Failed to get clock!\n");
+               return PTR_ERR(data->clk);
+       } else  {
+               ret = clk_prepare(data->clk);
                if (ret) {
-                       dev_dbg(dev, "Unabled to register interrupt handler\n");
-                       goto err_irq;
+                       dev_err(dev, "Failed to prepare clk\n");
+                       return ret;
                }
        }
 
-       if (dev_get_platdata(dev)) {
-               char *deli, *beg;
-               struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
-
-               beg = platdata->clockname;
-
-               for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
-                       /* NOTHING */;
-
-               if (*deli == '\0')
-                       deli = NULL;
-               else
-                       *deli = '\0';
-
-               data->clk[0] = clk_get(dev, beg);
-               if (IS_ERR(data->clk[0])) {
-                       data->clk[0] = NULL;
-                       dev_dbg(dev, "No clock descriptor registered\n");
-               }
-
-               if (data->clk[0] && deli) {
-                       *deli = ',';
-                       data->clk[1] = clk_get(dev, deli + 1);
-                       if (IS_ERR(data->clk[1]))
-                               data->clk[1] = NULL;
+       data->clk_master = devm_clk_get(dev, "master");
+       if (!IS_ERR(data->clk_master)) {
+               ret = clk_prepare(data->clk_master);
+               if (ret) {
+                       clk_unprepare(data->clk);
+                       dev_err(dev, "Failed to prepare master's clk\n");
+                       return ret;
                }
-
-               data->dbgname = platdata->dbgname;
        }
 
        data->sysmmu = dev;
-       rwlock_init(&data->lock);
-       INIT_LIST_HEAD(&data->node);
+       spin_lock_init(&data->lock);
 
-       __set_fault_handler(data, &default_fault_handler);
+       platform_set_drvdata(pdev, data);
 
-       if (dev->parent)
-               pm_runtime_enable(dev);
+       pm_runtime_enable(dev);
 
-       dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
        return 0;
-err_irq:
-       while (i-- > 0) {
-               int irq;
-
-               irq = platform_get_irq(pdev, i);
-               free_irq(irq, data);
-       }
-err_res:
-       while (data->nsfrs-- > 0)
-               iounmap(data->sfrbases[data->nsfrs]);
-       kfree(data->sfrbases);
-err_init:
-       kfree(data);
-err_alloc:
-       dev_err(dev, "Failed to initialize\n");
-       return ret;
 }
 
-static struct platform_driver exynos_sysmmu_driver = {
-       .probe          = exynos_sysmmu_probe,
-       .driver         = {
+static const struct of_device_id sysmmu_of_match[] __initconst = {
+       { .compatible   = "samsung,exynos-sysmmu", },
+       { },
+};
+
+static struct platform_driver exynos_sysmmu_driver __refdata = {
+       .probe  = exynos_sysmmu_probe,
+       .driver = {
                .owner          = THIS_MODULE,
                .name           = "exynos-sysmmu",
+               .of_match_table = sysmmu_of_match,
        }
 };
 
@@ -667,21 +700,32 @@ static inline void pgtable_flush(void *vastart, void *vaend)
 static int exynos_iommu_domain_init(struct iommu_domain *domain)
 {
        struct exynos_iommu_domain *priv;
+       int i;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       priv->pgtable = (unsigned long *)__get_free_pages(
-                                               GFP_KERNEL | __GFP_ZERO, 2);
+       priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
        if (!priv->pgtable)
                goto err_pgtable;
 
-       priv->lv2entcnt = (short *)__get_free_pages(
-                                               GFP_KERNEL | __GFP_ZERO, 1);
+       priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
        if (!priv->lv2entcnt)
                goto err_counter;
 
+       /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */
+       for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
+               priv->pgtable[i + 0] = ZERO_LV2LINK;
+               priv->pgtable[i + 1] = ZERO_LV2LINK;
+               priv->pgtable[i + 2] = ZERO_LV2LINK;
+               priv->pgtable[i + 3] = ZERO_LV2LINK;
+               priv->pgtable[i + 4] = ZERO_LV2LINK;
+               priv->pgtable[i + 5] = ZERO_LV2LINK;
+               priv->pgtable[i + 6] = ZERO_LV2LINK;
+               priv->pgtable[i + 7] = ZERO_LV2LINK;
+       }
+
        pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
 
        spin_lock_init(&priv->lock);
@@ -705,7 +749,7 @@ err_pgtable:
 static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
 {
        struct exynos_iommu_domain *priv = domain->priv;
-       struct sysmmu_drvdata *data;
+       struct exynos_iommu_owner *owner;
        unsigned long flags;
        int i;
 
@@ -713,16 +757,20 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       list_for_each_entry(data, &priv->clients, node) {
-               while (!exynos_sysmmu_disable(data->dev))
+       list_for_each_entry(owner, &priv->clients, client) {
+               while (!exynos_sysmmu_disable(owner->dev))
                        ; /* until System MMU is actually disabled */
        }
 
+       while (!list_empty(&priv->clients))
+               list_del_init(priv->clients.next);
+
        spin_unlock_irqrestore(&priv->lock, flags);
 
        for (i = 0; i < NUM_LV1ENTRIES; i++)
                if (lv1ent_page(priv->pgtable + i))
-                       kfree(__va(lv2table_base(priv->pgtable + i)));
+                       kmem_cache_free(lv2table_kmem_cache,
+                               phys_to_virt(lv2table_base(priv->pgtable + i)));
 
        free_pages((unsigned long)priv->pgtable, 2);
        free_pages((unsigned long)priv->lv2entcnt, 1);
@@ -733,114 +781,134 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
 static int exynos_iommu_attach_device(struct iommu_domain *domain,
                                   struct device *dev)
 {
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        struct exynos_iommu_domain *priv = domain->priv;
+       phys_addr_t pagetable = virt_to_phys(priv->pgtable);
        unsigned long flags;
        int ret;
 
-       ret = pm_runtime_get_sync(data->sysmmu);
-       if (ret < 0)
-               return ret;
-
-       ret = 0;
-
        spin_lock_irqsave(&priv->lock, flags);
 
-       ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
-
+       ret = __exynos_sysmmu_enable(dev, pagetable, domain);
        if (ret == 0) {
-               /* 'data->node' must not be appeared in priv->clients */
-               BUG_ON(!list_empty(&data->node));
-               data->dev = dev;
-               list_add_tail(&data->node, &priv->clients);
+               list_add_tail(&owner->client, &priv->clients);
+               owner->domain = domain;
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
        if (ret < 0) {
-               dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
-                               __func__, __pa(priv->pgtable));
-               pm_runtime_put(data->sysmmu);
-       } else if (ret > 0) {
-               dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
-                                       __func__, __pa(priv->pgtable));
-       } else {
-               dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
-                                       __func__, __pa(priv->pgtable));
+               dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
+                                       __func__, &pagetable);
+               return ret;
        }
 
+       dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
+               __func__, &pagetable, (ret == 0) ? "" : ", again");
+
        return ret;
 }
 
 static void exynos_iommu_detach_device(struct iommu_domain *domain,
                                    struct device *dev)
 {
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct exynos_iommu_owner *owner;
        struct exynos_iommu_domain *priv = domain->priv;
-       struct list_head *pos;
+       phys_addr_t pagetable = virt_to_phys(priv->pgtable);
        unsigned long flags;
-       bool found = false;
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       list_for_each(pos, &priv->clients) {
-               if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
-                       found = true;
+       list_for_each_entry(owner, &priv->clients, client) {
+               if (owner == dev->archdata.iommu) {
+                       if (exynos_sysmmu_disable(dev)) {
+                               list_del_init(&owner->client);
+                               owner->domain = NULL;
+                       }
                        break;
                }
        }
 
-       if (!found)
-               goto finish;
-
-       if (__exynos_sysmmu_disable(data)) {
-               dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
-                                       __func__, __pa(priv->pgtable));
-               list_del_init(&data->node);
-
-       } else {
-               dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
-                                       __func__, __pa(priv->pgtable));
-       }
-
-finish:
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       if (found)
-               pm_runtime_put(data->sysmmu);
+       if (owner == dev->archdata.iommu)
+               dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
+                                       __func__, &pagetable);
+       else
+               dev_err(dev, "%s: No IOMMU is attached\n", __func__);
 }
 
-static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
-                                       short *pgcounter)
+static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
+               sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
 {
+       if (lv1ent_section(sent)) {
+               WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
+               return ERR_PTR(-EADDRINUSE);
+       }
+
        if (lv1ent_fault(sent)) {
-               unsigned long *pent;
+               sysmmu_pte_t *pent;
+               bool need_flush_flpd_cache = lv1ent_zero(sent);
 
-               pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
-               BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+               pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
+               BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
                if (!pent)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
-               *sent = mk_lv1ent_page(__pa(pent));
+               *sent = mk_lv1ent_page(virt_to_phys(pent));
                *pgcounter = NUM_LV2ENTRIES;
                pgtable_flush(pent, pent + NUM_LV2ENTRIES);
                pgtable_flush(sent, sent + 1);
+
+               /*
+                * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
+                * may caches the address of zero_l2_table. This function
+                * replaces the zero_l2_table with new L2 page table to write
+                * valid mappings.
+                * Accessing the valid area may cause page fault since FLPD
+                * cache may still caches zero_l2_table for the valid area
+                * instead of new L2 page table that have the mapping
+                * information of the valid area
+                * Thus any replacement of zero_l2_table with other valid L2
+                * page table must involve FLPD cache invalidation for System
+                * MMU v3.3.
+                * FLPD cache invalidation is performed with TLB invalidation
+                * by VPN without blocking. It is safe to invalidate TLB without
+                * blocking because the target address of TLB invalidation is
+                * not currently mapped.
+                */
+               if (need_flush_flpd_cache) {
+                       struct exynos_iommu_owner *owner;
+
+                       spin_lock(&priv->lock);
+                       list_for_each_entry(owner, &priv->clients, client)
+                               sysmmu_tlb_invalidate_flpdcache(
+                                                       owner->dev, iova);
+                       spin_unlock(&priv->lock);
+               }
        }
 
        return page_entry(sent, iova);
 }
 
-static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+static int lv1set_section(struct exynos_iommu_domain *priv,
+                         sysmmu_pte_t *sent, sysmmu_iova_t iova,
+                         phys_addr_t paddr, short *pgcnt)
 {
-       if (lv1ent_section(sent))
+       if (lv1ent_section(sent)) {
+               WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
+                       iova);
                return -EADDRINUSE;
+       }
 
        if (lv1ent_page(sent)) {
-               if (*pgcnt != NUM_LV2ENTRIES)
+               if (*pgcnt != NUM_LV2ENTRIES) {
+                       WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
+                               iova);
                        return -EADDRINUSE;
+               }
 
-               kfree(page_entry(sent, 0));
-
+               kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
                *pgcnt = 0;
        }
 
@@ -848,14 +916,26 @@ static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
 
        pgtable_flush(sent, sent + 1);
 
+       spin_lock(&priv->lock);
+       if (lv1ent_page_zero(sent)) {
+               struct exynos_iommu_owner *owner;
+               /*
+                * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
+                * entry by speculative prefetch of SLPD which has no mapping.
+                */
+               list_for_each_entry(owner, &priv->clients, client)
+                       sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
+       }
+       spin_unlock(&priv->lock);
+
        return 0;
 }
 
-static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
                                                                short *pgcnt)
 {
        if (size == SPAGE_SIZE) {
-               if (!lv2ent_fault(pent))
+               if (WARN_ON(!lv2ent_fault(pent)))
                        return -EADDRINUSE;
 
                *pent = mk_lv2ent_spage(paddr);
@@ -863,9 +943,11 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
                *pgcnt -= 1;
        } else { /* size == LPAGE_SIZE */
                int i;
+
                for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
-                       if (!lv2ent_fault(pent)) {
-                               memset(pent, 0, sizeof(*pent) * i);
+                       if (WARN_ON(!lv2ent_fault(pent))) {
+                               if (i > 0)
+                                       memset(pent - i, 0, sizeof(*pent) * i);
                                return -EADDRINUSE;
                        }
 
@@ -878,11 +960,38 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
        return 0;
 }
 
-static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+/*
+ * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
+ *
+ * System MMU v3.x have an advanced logic to improve address translation
+ * performance with caching more page table entries by a page table walk.
+ * However, the logic has a bug that caching fault page table entries and System
+ * MMU reports page fault if the cached fault entry is hit even though the fault
+ * entry is updated to a valid entry after the entry is cached.
+ * To prevent caching fault page table entries which may be updated to valid
+ * entries later, the virtual memory manager should care about the w/a about the
+ * problem. The followings describe w/a.
+ *
+ * Any two consecutive I/O virtual address regions must have a hole of 128KiB
+ * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug)
+ *
+ * Precisely, any start address of I/O virtual region must be aligned by
+ * the following sizes for System MMU v3.1 and v3.2.
+ * System MMU v3.1: 128KiB
+ * System MMU v3.2: 256KiB
+ *
+ * Because System MMU v3.3 caches page table entries more aggressively, it needs
+ * more w/a.
+ * - Any two consecutive I/O virtual regions must be have a hole of larger size
+ *   than or equal size to 128KiB.
+ * - Start address of an I/O virtual region must be aligned by 128KiB.
+ */
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
                         phys_addr_t paddr, size_t size, int prot)
 {
        struct exynos_iommu_domain *priv = domain->priv;
-       unsigned long *entry;
+       sysmmu_pte_t *entry;
+       sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
        unsigned long flags;
        int ret = -ENOMEM;
 
@@ -893,38 +1002,52 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
        entry = section_entry(priv->pgtable, iova);
 
        if (size == SECT_SIZE) {
-               ret = lv1set_section(entry, paddr,
+               ret = lv1set_section(priv, entry, iova, paddr,
                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
        } else {
-               unsigned long *pent;
+               sysmmu_pte_t *pent;
 
-               pent = alloc_lv2entry(entry, iova,
+               pent = alloc_lv2entry(priv, entry, iova,
                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 
-               if (!pent)
-                       ret = -ENOMEM;
+               if (IS_ERR(pent))
+                       ret = PTR_ERR(pent);
                else
                        ret = lv2set_page(pent, paddr, size,
                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
        }
 
-       if (ret) {
-               pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
-                                                       __func__, iova, size);
-       }
+       if (ret)
+               pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
+                       __func__, ret, size, iova);
 
        spin_unlock_irqrestore(&priv->pgtablelock, flags);
 
        return ret;
 }
 
+static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
+                                               sysmmu_iova_t iova, size_t size)
+{
+       struct exynos_iommu_owner *owner;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       list_for_each_entry(owner, &priv->clients, client)
+               sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
+
 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
-                                              unsigned long iova, size_t size)
+                                       unsigned long l_iova, size_t size)
 {
        struct exynos_iommu_domain *priv = domain->priv;
-       struct sysmmu_drvdata *data;
+       sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
+       sysmmu_pte_t *ent;
+       size_t err_pgsize;
        unsigned long flags;
-       unsigned long *ent;
 
        BUG_ON(priv->pgtable == NULL);
 
@@ -933,9 +1056,12 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
        ent = section_entry(priv->pgtable, iova);
 
        if (lv1ent_section(ent)) {
-               BUG_ON(size < SECT_SIZE);
+               if (WARN_ON(size < SECT_SIZE)) {
+                       err_pgsize = SECT_SIZE;
+                       goto err;
+               }
 
-               *ent = 0;
+               *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */
                pgtable_flush(ent, ent + 1);
                size = SECT_SIZE;
                goto done;
@@ -959,34 +1085,42 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
        if (lv2ent_small(ent)) {
                *ent = 0;
                size = SPAGE_SIZE;
+               pgtable_flush(ent, ent + 1);
                priv->lv2entcnt[lv1ent_offset(iova)] += 1;
                goto done;
        }
 
        /* lv1ent_large(ent) == true here */
-       BUG_ON(size < LPAGE_SIZE);
+       if (WARN_ON(size < LPAGE_SIZE)) {
+               err_pgsize = LPAGE_SIZE;
+               goto err;
+       }
 
        memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+       pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
 
        size = LPAGE_SIZE;
        priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
 done:
        spin_unlock_irqrestore(&priv->pgtablelock, flags);
 
-       spin_lock_irqsave(&priv->lock, flags);
-       list_for_each_entry(data, &priv->clients, node)
-               sysmmu_tlb_invalidate_entry(data->dev, iova);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
+       exynos_iommu_tlb_invalidate_entry(priv, iova, size);
 
        return size;
+err:
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
+               __func__, size, iova, err_pgsize);
+
+       return 0;
 }
 
 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
                                          dma_addr_t iova)
 {
        struct exynos_iommu_domain *priv = domain->priv;
-       unsigned long *entry;
+       sysmmu_pte_t *entry;
        unsigned long flags;
        phys_addr_t phys = 0;
 
@@ -1010,14 +1144,42 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
        return phys;
 }
 
+static int exynos_iommu_add_device(struct device *dev)
+{
+       struct iommu_group *group;
+       int ret;
+
+       group = iommu_group_get(dev);
+
+       if (!group) {
+               group = iommu_group_alloc();
+               if (IS_ERR(group)) {
+                       dev_err(dev, "Failed to allocate IOMMU group\n");
+                       return PTR_ERR(group);
+               }
+       }
+
+       ret = iommu_group_add_device(group, dev);
+       iommu_group_put(group);
+
+       return ret;
+}
+
+static void exynos_iommu_remove_device(struct device *dev)
+{
+       iommu_group_remove_device(dev);
+}
+
 static struct iommu_ops exynos_iommu_ops = {
-       .domain_init = &exynos_iommu_domain_init,
-       .domain_destroy = &exynos_iommu_domain_destroy,
-       .attach_dev = &exynos_iommu_attach_device,
-       .detach_dev = &exynos_iommu_detach_device,
-       .map = &exynos_iommu_map,
-       .unmap = &exynos_iommu_unmap,
-       .iova_to_phys = &exynos_iommu_iova_to_phys,
+       .domain_init = exynos_iommu_domain_init,
+       .domain_destroy = exynos_iommu_domain_destroy,
+       .attach_dev = exynos_iommu_attach_device,
+       .detach_dev = exynos_iommu_detach_device,
+       .map = exynos_iommu_map,
+       .unmap = exynos_iommu_unmap,
+       .iova_to_phys = exynos_iommu_iova_to_phys,
+       .add_device = exynos_iommu_add_device,
+       .remove_device = exynos_iommu_remove_device,
        .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
 };
 
@@ -1025,11 +1187,41 @@ static int __init exynos_iommu_init(void)
 {
        int ret;
 
+       lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
+                               LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
+       if (!lv2table_kmem_cache) {
+               pr_err("%s: Failed to create kmem cache\n", __func__);
+               return -ENOMEM;
+       }
+
        ret = platform_driver_register(&exynos_sysmmu_driver);
+       if (ret) {
+               pr_err("%s: Failed to register driver\n", __func__);
+               goto err_reg_driver;
+       }
 
-       if (ret == 0)
-               bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+       zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
+       if (zero_lv2_table == NULL) {
+               pr_err("%s: Failed to allocate zero level2 page table\n",
+                       __func__);
+               ret = -ENOMEM;
+               goto err_zero_lv2;
+       }
 
+       ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+       if (ret) {
+               pr_err("%s: Failed to register exynos-iommu driver.\n",
+                                                               __func__);
+               goto err_set_iommu;
+       }
+
+       return 0;
+err_set_iommu:
+       kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
+err_zero_lv2:
+       platform_driver_unregister(&exynos_sysmmu_driver);
+err_reg_driver:
+       kmem_cache_destroy(lv2table_kmem_cache);
        return ret;
 }
 subsys_initcall(exynos_iommu_init);
index cba0498eb0116b8d4366a0e3cb1abf8dbfc23d6b..b99dd88e31b9b20b41bccde26fd54789a4d61250 100644 (file)
@@ -592,8 +592,7 @@ found_cpu_node:
                /* advance to next node in cache hierarchy */
                node = of_find_node_by_phandle(*prop);
                if (!node) {
-                       pr_debug("Invalid node for cache hierarchy %s\n",
-                               node->full_name);
+                       pr_debug("Invalid node for cache hierarchy\n");
                        return ~(u32)0;
                }
        }
index 08ba4972da9d2543a8ec9ad9f312451647bcd1e9..61def7cb52633c04b4ea764f4948de69d12656f4 100644 (file)
@@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb)
 
 static int msm_iommu_probe(struct platform_device *pdev)
 {
-       struct resource *r, *r2;
+       struct resource *r;
        struct clk *iommu_clk;
        struct clk *iommu_pclk;
        struct msm_iommu_drvdata *drvdata;
        struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
        void __iomem *regs_base;
-       resource_size_t len;
        int ret, irq, par;
 
        if (pdev->id == -1) {
@@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev)
                iommu_clk = NULL;
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
-
-       if (!r) {
-               ret = -ENODEV;
-               goto fail_clk;
-       }
-
-       len = resource_size(r);
-
-       r2 = request_mem_region(r->start, len, r->name);
-       if (!r2) {
-               pr_err("Could not request memory region: start=%p, len=%d\n",
-                                                       (void *) r->start, len);
-               ret = -EBUSY;
+       regs_base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(regs_base)) {
+               ret = PTR_ERR(regs_base);
                goto fail_clk;
        }
 
-       regs_base = ioremap(r2->start, len);
-
-       if (!regs_base) {
-               pr_err("Could not ioremap: start=%p, len=%d\n",
-                        (void *) r2->start, len);
-               ret = -EBUSY;
-               goto fail_mem;
-       }
-
        irq = platform_get_irq_byname(pdev, "secure_irq");
        if (irq < 0) {
                ret = -ENODEV;
-               goto fail_io;
+               goto fail_clk;
        }
 
        msm_iommu_reset(regs_base, iommu_dev->ncb);
@@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
        if (!par) {
                pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
                ret = -ENODEV;
-               goto fail_io;
+               goto fail_clk;
        }
 
        ret = request_irq(irq, msm_iommu_fault_handler, 0,
                        "msm_iommu_secure_irpt_handler", drvdata);
        if (ret) {
                pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
-               goto fail_io;
+               goto fail_clk;
        }
 
 
@@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
        clk_disable(iommu_pclk);
 
        return 0;
-fail_io:
-       iounmap(regs_base);
-fail_mem:
-       release_mem_region(r->start, len);
 fail_clk:
        if (iommu_clk) {
                clk_disable(iommu_clk);
index 7fcbfc498fa93c2527968191e6658be99eaa7b2d..895af06a667fefc404967b1a83375727fb09d41f 100644 (file)
@@ -34,6 +34,9 @@
 #include "omap-iopgtable.h"
 #include "omap-iommu.h"
 
+#define to_iommu(dev)                                                  \
+       ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+
 #define for_each_iotlb_cr(obj, n, __i, cr)                             \
        for (__i = 0;                                                   \
             (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
@@ -391,6 +394,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
                                __func__, start, da, bytes);
                        iotlb_load_cr(obj, &cr);
                        iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
+                       break;
                }
        }
        pm_runtime_put_sync(obj->dev);
@@ -1037,19 +1041,18 @@ static void iopte_cachep_ctor(void *iopte)
        clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
 }
 
-static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
-                                  u32 flags)
+static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
 {
        memset(e, 0, sizeof(*e));
 
        e->da           = da;
        e->pa           = pa;
-       e->valid        = 1;
+       e->valid        = MMU_CAM_V;
        /* FIXME: add OMAP1 support */
-       e->pgsz         = flags & MMU_CAM_PGSZ_MASK;
-       e->endian       = flags & MMU_RAM_ENDIAN_MASK;
-       e->elsz         = flags & MMU_RAM_ELSZ_MASK;
-       e->mixed        = flags & MMU_RAM_MIXED_MASK;
+       e->pgsz         = pgsz;
+       e->endian       = MMU_RAM_ENDIAN_LITTLE;
+       e->elsz         = MMU_RAM_ELSZ_8;
+       e->mixed        = 0;
 
        return iopgsz_to_bytes(e->pgsz);
 }
@@ -1062,9 +1065,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
        struct device *dev = oiommu->dev;
        struct iotlb_entry e;
        int omap_pgsz;
-       u32 ret, flags;
+       u32 ret;
 
-       /* we only support mapping a single iommu page for now */
        omap_pgsz = bytes_to_iopgsz(bytes);
        if (omap_pgsz < 0) {
                dev_err(dev, "invalid size to map: %d\n", bytes);
@@ -1073,9 +1075,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 
        dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
 
-       flags = omap_pgsz | prot;
-
-       iotlb_init_entry(&e, da, pa, flags);
+       iotlb_init_entry(&e, da, pa, omap_pgsz);
 
        ret = omap_iopgtable_store_entry(oiommu, &e);
        if (ret)
@@ -1248,12 +1248,6 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
        return ret;
 }
 
-static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
-                                   unsigned long cap)
-{
-       return 0;
-}
-
 static int omap_iommu_add_device(struct device *dev)
 {
        struct omap_iommu_arch_data *arch_data;
@@ -1305,7 +1299,6 @@ static struct iommu_ops omap_iommu_ops = {
        .map            = omap_iommu_map,
        .unmap          = omap_iommu_unmap,
        .iova_to_phys   = omap_iommu_iova_to_phys,
-       .domain_has_cap = omap_iommu_domain_has_cap,
        .add_device     = omap_iommu_add_device,
        .remove_device  = omap_iommu_remove_device,
        .pgsize_bitmap  = OMAP_IOMMU_PGSIZES,
index b6f9a51746caad5956442e7d99a1b5450cf72f0b..f891683e3f05af915738151a1a7574268fe84b3b 100644 (file)
@@ -93,6 +93,3 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 /* to find an entry in the second-level page table. */
 #define iopte_index(da)                (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
-
-#define to_iommu(dev)                                                  \
-       (platform_get_drvdata(to_platform_device(dev)))
index e3bc2e19b6dd8f2cd7bd91b30120ecbba477abe3..bd97adecb1fd28fb1419db63c261a1fadd7cf4f9 100644 (file)
@@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev)
        struct resource *res;
        struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "cannot get platform resources\n");
-               return -ENOENT;
-       }
        ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
        if (!ipmmu) {
                dev_err(&pdev->dev, "cannot allocate device data\n");
@@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev)
        }
        spin_lock_init(&ipmmu->flush_lock);
        ipmmu->dev = &pdev->dev;
-       ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
-                                               resource_size(res));
-       if (!ipmmu->ipmmu_base) {
-               dev_err(&pdev->dev, "ioremap_nocache failed\n");
-               return -ENOMEM;
-       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ipmmu->ipmmu_base))
+               return PTR_ERR(ipmmu->ipmmu_base);
+
        ipmmu->dev_names = pdata->dev_names;
        ipmmu->num_dev_names = pdata->num_dev_names;
        platform_set_drvdata(pdev, ipmmu);
        ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
        ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
-       ipmmu_iommu_init(ipmmu);
-       return 0;
+       return ipmmu_iommu_init(ipmmu);
 }
 
 static struct platform_driver ipmmu_driver = {
index 9f69e818b0009db7881b3f8c862393836e5a604b..93580a47cc548851a36d1ff4cb45e88636565a3e 100644 (file)
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr);
+       alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-                           u16 vid)
+                           __be16 vlan_proto, u16 vid)
 {
        struct learning_pkt pkt;
        struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        skb->dev = slave->dev;
 
        if (vid) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+               skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
                        pr_err("%s: Error: failed to insert VLAN tag\n",
                               slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        dev_queue_xmit(skb);
 }
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
 
        /* send untagged */
-       alb_send_lp_vid(slave, mac_addr, 0);
+       alb_send_lp_vid(slave, mac_addr, 0, 0);
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
        netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-               if (upper->priv_flags & IFF_802_1Q_VLAN)
-                       alb_send_lp_vid(slave, mac_addr,
-                                       vlan_dev_vlan_id(upper));
+               if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+                       if (strict_match &&
+                           ether_addr_equal_64bits(mac_addr,
+                                                   upper->dev_addr)) {
+                               alb_send_lp_vid(slave, mac_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       } else if (!strict_match) {
+                               alb_send_lp_vid(slave, upper->dev_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       }
+               }
        }
        rcu_read_unlock();
 }
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 
        /* fasten the change in the switch */
        if (SLAVE_IS_OK(slave1)) {
-               alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+               alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
        }
 
        if (SLAVE_IS_OK(slave2)) {
-               alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+               alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
 
        /* send learning packets */
        if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+               bool strict_match;
+
                /* change of curr_active_slave involves swapping of mac addresses.
                 * in order to avoid this swapping from happening while
                 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave_rcu(bond, slave, iter)
-                       alb_send_learning_packets(slave, slave->dev->dev_addr);
+               bond_for_each_slave_rcu(bond, slave, iter) {
+                       /* If updating current_active, use all currently
+                        * user mac addreses (!strict_match).  Otherwise, only
+                        * use mac of the slave device.
+                        */
+                       strict_match = (slave != bond->curr_active_slave);
+                       alb_send_learning_packets(slave, slave->dev->dev_addr,
+                                                 strict_match);
+               }
 
                read_unlock(&bond->curr_slave_lock);
 
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        } else {
                /* set the new_slave to the bond mac address */
                alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-               alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+               alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+                                         false);
        }
 
        write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_send_learning_packets(bond->curr_active_slave,
+                                         bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
                        rlb_req_update_slave_clients(bond, bond->curr_active_slave);
index 69aff72c895716fe6c579d2bf7f46c79ddca2a36..d3a67896d43541f0b8ebc0b4e193db945087bc56 100644 (file)
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          __be32 dest_ip, __be32 src_ip,
-                         struct bond_vlan_tag *inner,
-                         struct bond_vlan_tag *outer)
+                         struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
+       int i;
 
        pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
                 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (outer->vlan_id) {
-               if (inner->vlan_id) {
-                       pr_debug("inner tag: proto %X vid %X\n",
-                                ntohs(inner->vlan_proto), inner->vlan_id);
-                       skb = __vlan_put_tag(skb, inner->vlan_proto,
-                                            inner->vlan_id);
-                       if (!skb) {
-                               net_err_ratelimited("failed to insert inner VLAN tag\n");
-                               return;
-                       }
-               }
 
-               pr_debug("outer reg: proto %X vid %X\n",
-                        ntohs(outer->vlan_proto), outer->vlan_id);
-               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+       /* Go through all the tags backwards and add them to the packet */
+       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+               if (!tags[i].vlan_id)
+                       continue;
+
+               pr_debug("inner tag: proto %X vid %X\n",
+                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+                                    tags[i].vlan_id);
+               if (!skb) {
+                       net_err_ratelimited("failed to insert inner VLAN tag\n");
+                       return;
+               }
+       }
+       /* Set the outer tag */
+       if (tags[0].vlan_id) {
+               pr_debug("outer tag: proto %X vid %X\n",
+                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
        arp_xmit(skb);
 }
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+                                   struct net_device *end_dev,
+                                   struct bond_vlan_tag *tags)
+{
+       struct net_device *upper;
+       struct list_head  *iter;
+       int  idx;
+
+       if (start_dev == end_dev)
+               return true;
+
+       netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+               if (bond_verify_device_path(upper, end_dev, tags)) {
+                       if (is_vlan_dev(upper)) {
+                               idx = vlan_get_encap_level(upper);
+                               if (idx >= BOND_MAX_VLAN_ENCAP)
+                                       return false;
+
+                               tags[idx].vlan_proto =
+                                                   vlan_dev_vlan_proto(upper);
+                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+                       }
+                       return true;
+               }
+       }
+
+       return false;
+}
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       struct net_device *upper, *vlan_upper;
-       struct list_head *iter, *vlan_iter;
        struct rtable *rt;
-       struct bond_vlan_tag inner, outer;
+       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
+       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
-               inner.vlan_proto = 0;
-               inner.vlan_id = 0;
-               outer.vlan_proto = 0;
-               outer.vlan_id = 0;
+               memset(tags, 0, sizeof(tags));
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                                net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
                                                     bond->dev->name,
                                                     &targets[i]);
-                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+                                     0, tags);
                        continue;
                }
 
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               /* first we search only for vlan devices. for every vlan
-                * found we verify its upper dev list, searching for the
-                * rt->dst.dev. If found we save the tag of the vlan and
-                * proceed to send the packet.
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-                                                 vlan_iter) {
-                       if (!is_vlan_dev(vlan_upper))
-                               continue;
-
-                       if (vlan_upper == rt->dst.dev) {
-                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                               rcu_read_unlock();
-                               goto found;
-                       }
-                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-                                                         iter) {
-                               if (upper == rt->dst.dev) {
-                                       /* If the upper dev is a vlan dev too,
-                                        *  set the vlan tag to inner tag.
-                                        */
-                                       if (is_vlan_dev(upper)) {
-                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
-                                               inner.vlan_id = vlan_dev_vlan_id(upper);
-                                       }
-                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                                       rcu_read_unlock();
-                                       goto found;
-                               }
-                       }
-               }
-
-               /* if the device we're looking for is not on top of any of
-                * our upper vlans, then just search for any dev that
-                * matches, and in case it's a vlan - save the id
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-                       if (upper == rt->dst.dev) {
-                               rcu_read_unlock();
-                               goto found;
-                       }
-               }
+               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
                rcu_read_unlock();
 
+               if (ret)
+                       goto found;
+
                /* Not our device - skip */
                pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
                         bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, &inner, &outer);
+                             addr, tags);
        }
 }
 
index 724e30fa20b9fa70166b5d9b25ed9029fab6db73..832070298446458483cdf1132cc46b63ba323a7d 100644 (file)
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+       { NULL,      -1,      0}
 };
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
index b8bdd0acc8f334ac97bca2ddfea602f473c3272f..00bea320e3b50c1eaa75a712f7afbbe65ac146fb 100644 (file)
@@ -36,6 +36,7 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
+#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
index 8ab7103d4f44ea616ae8cb945eda8ea84aa3059b..61ffc12d8fd8e4e01056b06fd3ae73be7abe513e 100644 (file)
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
          SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
          boards like am335x, dm814x, dm813x and dm811x.
 
-config CAN_C_CAN_STRICT_FRAME_ORDERING
-       bool "Force a strict RX CAN frame order (may cause frame loss)"
-       ---help---
-         The RX split buffer prevents packet reordering but can cause packet
-         loss. Only enable this option when you accept to lose CAN frames
-         in favour of getting the received CAN frames in the correct order.
-
 config CAN_C_CAN_PCI
        tristate "Generic PCI Bus based C_CAN/D_CAN driver"
        depends on PCI
index a2ca820b5373841d2083d8d5de02fa9fce6c0ecf..95e04e2002daec774d394d0f0912ca7e952f280f 100644 (file)
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
 static inline void c_can_rx_object_get(struct net_device *dev,
                                       struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-       else
-#endif
                c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 }
 
 static inline void c_can_rx_finalize(struct net_device *dev,
                                     struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               priv->rxmasked |= BIT(obj - 1);
-       else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-               priv->rxmasked = 0;
-               /* activate all lower message objects */
-               c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-       }
-#endif
        if (priv->type != BOSCH_D_CAN)
                c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 }
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
 {
        u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       pend &= ~priv->rxmasked;
-#endif
        return pend;
 }
 
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
  * We clear the newdat bit right away.
  *
  * This can result in packet reordering when the readout is slow.
index c540e3d12e3d826260dbb590e8045c92fa4efb2b..564933ae218c78848dfba1e166f219e9de994e79 100644 (file)
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct sja1000_priv *priv;
        struct peak_pci_chan *chan;
-       struct net_device *dev;
+       struct net_device *dev, *prev_dev;
        void __iomem *cfg_base, *reg_base;
        u16 sub_sys_id, icr;
        int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
        writew(0x0, cfg_base + PITA_ICR + 2);
 
        chan = NULL;
-       for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-               unregister_sja1000dev(dev);
-               free_sja1000dev(dev);
+       for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
                priv = netdev_priv(dev);
                chan = priv->priv;
+               prev_dev = chan->prev_dev;
+
+               unregister_sja1000dev(dev);
+               free_sja1000dev(dev);
        }
 
        /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
        /* Loop over all registered devices */
        while (1) {
+               struct net_device *prev_dev = chan->prev_dev;
+
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
-               dev = chan->prev_dev;
+               dev = prev_dev;
 
                if (!dev) {
                        /* do that only for first channel */
index 39b26fe28d1051ff916faceb747da7a64dac711f..d7401017a3f10940f3a662bebc555d835be3ce4b 100644 (file)
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+       tristate "Beckhoff CX5020 EtherCAT master support"
+       depends on PCI
+       ---help---
+         Driver for EtherCAT master module located on CCAT FPGA
+         that can be found on Beckhoff CX5020, and possibly other of CX
+         Beckhoff CX series industrial PCs.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
index 545d0b3b9cb422b2fefa7122b074cd869a9085c2..35190e36c4568e6803279f878a6aa866685ca1be 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
index d4a187e453698bbe96589921f97d5ea38bc3d5fb..3eff2fd3997e36ef128983fcaf080f595a693715 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
index 4d1f2fdd5c3275c3c9952cd118bda99b544ebd55..0fb986ba32905a5dac78c926333c9765ec4678de 100644 (file)
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 {
        int counter;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
 
        /* Reset Rx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+               msgdma_csroffs(status));
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&rxcsr->status,
+               if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Rx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
        /* Reset Tx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+               msgdma_csroffs(status));
+
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&txcsr->status,
+               if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Tx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-       iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-       iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-       iowrite32(0, &desc->write_addr_lo);
-       iowrite32(0, &desc->write_addr_hi);
-       iowrite32(buffer->len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-       iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+       csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_lo));
+       csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_hi));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+       csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+               msgdma_descroffs(stride));
+       csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+               msgdma_descroffs(control));
        return 0;
 }
 
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
        u32 ready = 0;
        u32 inuse;
        u32 status;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
 
        /* Get number of sent descriptors */
-       inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+       inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+                       & 0xffff;
 
        if (inuse) { /* Tx FIFO is not empty */
                ready = priv->tx_prod - priv->tx_cons - inuse - 1;
        } else {
                /* Check for buffered last packet */
-               status = ioread32(&txcsr->status);
+               status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
                if (status & MSGDMA_CSR_STAT_BUSY)
                        ready = priv->tx_prod - priv->tx_cons - 1;
                else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        struct tse_buffer *rxbuffer)
 {
-       struct msgdma_extended_desc *desc = priv->rx_dma_desc;
        u32 len = priv->rx_dma_buf_sz;
        dma_addr_t dma_addr = rxbuffer->dma_addr;
        u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        | MSGDMA_DESC_CTL_TR_ERR_IRQ
                        | MSGDMA_DESC_CTL_GO);
 
-       iowrite32(0, &desc->read_addr_lo);
-       iowrite32(0, &desc->read_addr_hi);
-       iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-       iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-       iowrite32(len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(0x00010001, &desc->stride);
-       iowrite32(control, &desc->control);
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+       csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_lo));
+       csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_hi));
+       csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+       csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 
 /* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
        u32 rxstatus = 0;
        u32 pktlength;
        u32 pktstatus;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
-       struct msgdma_response *rxresp =
-               (struct msgdma_response *)priv->rx_dma_resp;
-
-       if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-               pktlength = ioread32(&rxresp->bytes_transferred);
-               pktstatus = ioread32(&rxresp->status);
+
+       if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+           & 0xffff) {
+               pktlength = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(bytes_transferred));
+               pktstatus = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(status));
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
index d7b59ba4019c1fe1914b2b5c0643e464fcdff62d..e335626e1b6b5288c4f3d4cc8f45d79dcd56655d 100644 (file)
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-       u32 read_addr;  /* data buffer source address */
-       u32 write_addr; /* data buffer destination address */
-       u32 len;        /* the number of bytes to transfer per descriptor */
-       u32 control;    /* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
  */
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
        u32 status;
 };
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
  */
 #define MSGDMA_RESP_EARLY_TERM BIT(8)
index 9ce8630692b6e7c3a380745ec77afe0e005f1723..99cc56f451cf4886fbf0a5bca044dfa5cf033a2c 100644 (file)
@@ -20,8 +20,8 @@
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                             struct sgdma_descrip *desc);
+                             struct sgdma_descrip __iomem *desc);
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        priv->rxdescphys = (dma_addr_t) 0;
        priv->txdescphys = (dma_addr_t) 0;
 
-       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+       priv->rxdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->rx_dma_desc,
                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
-       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+       priv->txdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->tx_dma_desc,
                                          priv->txdescmem, DMA_TO_DEVICE);
 
        if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        }
 
        /* Initialize descriptor memory to all 0's, sync memory to cache */
-       memset(priv->tx_dma_desc, 0, priv->txdescmem);
-       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
-       u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
-       u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
        /* Initialize descriptor memory to 0 */
-       memset(ptxdescripmem, 0, txdescriplen);
-       memset(prxdescripmem, 0, rxdescriplen);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
-       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-       iowrite32(0, &ptxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
-       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-       iowrite32(0, &prxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 
 /* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       int pktstx = 0;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        /* wait 'til the tx sgdma is ready for the next transmit request */
        if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
                            0,                          /* read fixed */
                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 
-       pktstx = sgdma_async_write(priv, cdesc);
+       sgdma_async_write(priv, cdesc);
 
        /* enqueue the request to the pending transmit queue */
        queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
        u32 ready = 0;
-       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
 
        if (!sgdma_txbusy(priv) &&
-           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+            & SGDMA_CONTROL_HW_OWNED) == 0) &&
            (dequeue_tx(priv))) {
                ready = 1;
        }
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
-       struct sgdma_descrip *desc = NULL;
-       int pktsrx;
-       unsigned int rxstatus = 0;
-       unsigned int pktlength = 0;
-       unsigned int pktstatus = 0;
+       struct sgdma_descrip __iomem *base =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *desc = NULL;
        struct tse_buffer *rxbuffer = NULL;
+       unsigned int rxstatus = 0;
 
-       u32 sts = ioread32(&csr->status);
+       u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
        desc = &base[0];
        if (sts & SGDMA_STSREG_EOP) {
+               unsigned int pktlength = 0;
+               unsigned int pktstatus = 0;
                dma_sync_single_for_cpu(priv->device,
                                        priv->rxdescphys,
                                        priv->sgdmadesclen,
                                        DMA_FROM_DEVICE);
 
-               pktlength = desc->bytes_xferred;
-               pktstatus = desc->status & 0x3f;
-               rxstatus = pktstatus;
+               pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+               pktstatus = csrrd8(desc, sgdma_descroffs(status));
+               rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
                if (rxstatus) {
-                       desc->status = 0;
+                       csrwr8(0, desc, sgdma_descroffs(status));
 
                        rxbuffer = dequeue_rx(priv);
                        if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                                            "sgdma rx and rx queue empty!\n");
 
                        /* Clear control */
-                       iowrite32(0, &csr->control);
+                       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
                        /* clear status */
-                       iowrite32(0xf, &csr->status);
+                       csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 
                        /* kick the rx sgdma after reaping this descriptor */
-                       pktsrx = sgdma_async_read(priv);
+                       sgdma_async_read(priv);
 
                } else {
                        /* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                         */
                        netdev_err(priv->dev,
                                   "SGDMA RX Error Info: %x, %x, %x\n",
-                                  sts, desc->status, rxstatus);
+                                  sts, csrrd8(desc, sgdma_descroffs(status)),
+                                  rxstatus);
                }
        } else if (sts == 0) {
-               pktsrx = sgdma_async_read(priv);
+               sgdma_async_read(priv);
        }
 
        return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed)
 {
        /* Clear the next descriptor as not owned by hardware */
-       u32 ctrl = ndesc->control;
+
+       u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-       ndesc->control = ctrl;
+       csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
-       ctrl = 0;
        ctrl = SGDMA_CONTROL_HW_OWNED;
        ctrl |= generate_eop;
        ctrl |= rfixed;
        ctrl |= wfixed;
 
        /* Channel is implicitly zero, initialized to 0 by default */
-
-       desc->raddr = raddr;
-       desc->waddr = waddr;
-       desc->next = lower_32_bits(ndesc_phys);
-       desc->control = ctrl;
-       desc->status = 0;
-       desc->rburst = 0;
-       desc->wburst = 0;
-       desc->bytes = length;
-       desc->bytes_xferred = 0;
+       csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+       csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+       csrwr32(0, desc, sgdma_descroffs(pad1));
+       csrwr32(0, desc, sgdma_descroffs(pad2));
+       csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+       csrwr8(ctrl, desc, sgdma_descroffs(control));
+       csrwr8(0, desc, sgdma_descroffs(status));
+       csrwr8(0, desc, sgdma_descroffs(wburst));
+       csrwr8(0, desc, sgdma_descroffs(rburst));
+       csrwr16(length, desc, sgdma_descroffs(bytes));
+       csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 
 /* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        struct tse_buffer *rxbuffer = NULL;
 
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
                                           priv->sgdmadesclen,
                                           DMA_TO_DEVICE);
 
-               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-                         &csr->next_descrip);
+               csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(next_descrip));
 
-               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-                         &csr->control);
+               csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(control));
 
                return 1;
        }
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                            struct sgdma_descrip *desc)
+                            struct sgdma_descrip __iomem *desc)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
        if (sgdma_txbusy(priv))
                return 0;
 
        /* clear control and status */
-       iowrite32(0, &csr->control);
-       iowrite32(0x1f, &csr->status);
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->sgdmadesclen, DMA_TO_DEVICE);
 
-       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-                 &csr->next_descrip);
+       csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+               priv->tx_dma_csr,
+               sgdma_csroffs(next_descrip));
 
-       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-                 &csr->control);
+       csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+               priv->tx_dma_csr,
+               sgdma_csroffs(control));
 
        return 1;
 }
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->txdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->rxdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+       return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+                      & SGDMA_STSREG_BUSY;
 }
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
        int delay = 0;
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
 
        /* if DMA is busy, wait for current transactino to finish */
-       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+       while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+               & SGDMA_STSREG_BUSY) && (delay++ < 100))
                udelay(1);
 
-       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+       if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+           & SGDMA_STSREG_BUSY) {
                netdev_err(priv->dev, "timeout waiting for tx dma\n");
                return 1;
        }
index ba3334f353836cd0441d537ba5fda8a28bea171e..85bc33b218d946f557647d1a73f9699e3189827b 100644 (file)
 
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
-       unsigned int    raddr; /* address of data to be read */
-       unsigned int    pad1;
-       unsigned int    waddr;
-       unsigned int    pad2;
-       unsigned int    next;
-       unsigned int    pad3;
-       unsigned short  bytes;
-       unsigned char   rburst;
-       unsigned char   wburst;
-       unsigned short  bytes_xferred;  /* 16 bits, bytes xferred */
+       u32     raddr; /* address of data to be read */
+       u32     pad1;
+       u32     waddr;
+       u32     pad2;
+       u32     next;
+       u32     pad3;
+       u16     bytes;
+       u8      rburst;
+       u8      wburst;
+       u16     bytes_xferred;  /* 16 bits, bytes xferred */
 
        /* bit 0: error
         * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
         * bit 6: reserved
         * bit 7: status eop for recv case
         */
-       unsigned char   status;
+       u8      status;
 
        /* bit 0: eop
         * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
         * bits 3,4,5,6: Channel (always 0)
         * bit 7: hardware owned
         */
-       unsigned char   control;
+       u8      control;
 } __packed;
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
        u32     pad3[3];
 };
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 #define SGDMA_STSREG_ERR       BIT(0) /* Error */
 #define SGDMA_STSREG_EOP       BIT(1) /* EOP */
index 465c4aabebbd49d299cb266544c3469acd0f3b6b..2adb24d4523c915d3b7d87f1294ead36757cea50 100644 (file)
@@ -357,6 +357,8 @@ struct altera_tse_mac {
        u32 reserved5[42];
 };
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC                BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
index 76133caffa78eb3b1e385525fdfc7fda142e77e1..54c25eff795272661e2caabfd554ffcc7dd24e00 100644 (file)
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
                           u64 *buf)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        u64 ext;
 
-       buf[0] = ioread32(&mac->frames_transmitted_ok);
-       buf[1] = ioread32(&mac->frames_received_ok);
-       buf[2] = ioread32(&mac->frames_check_sequence_errors);
-       buf[3] = ioread32(&mac->alignment_errors);
+       buf[0] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_transmitted_ok));
+       buf[1] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_received_ok));
+       buf[2] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_check_sequence_errors));
+       buf[3] = csrrd32(priv->mac_dev,
+                        tse_csroffs(alignment_errors));
 
        /* Extended aOctetsTransmittedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-       ext |= ioread32(&mac->octets_transmitted_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_transmitted_ok));
        buf[4] = ext;
 
        /* Extended aOctetsReceivedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-       ext |= ioread32(&mac->octets_received_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_received_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_received_ok));
        buf[5] = ext;
 
-       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-       buf[8] = ioread32(&mac->if_in_errors);
-       buf[9] = ioread32(&mac->if_out_errors);
-       buf[10] = ioread32(&mac->if_in_ucast_pkts);
-       buf[11] = ioread32(&mac->if_in_multicast_pkts);
-       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-       buf[13] = ioread32(&mac->if_out_discards);
-       buf[14] = ioread32(&mac->if_out_ucast_pkts);
-       buf[15] = ioread32(&mac->if_out_multicast_pkts);
-       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-       buf[17] = ioread32(&mac->ether_stats_drop_events);
+       buf[6] = csrrd32(priv->mac_dev,
+                        tse_csroffs(tx_pause_mac_ctrl_frames));
+       buf[7] = csrrd32(priv->mac_dev,
+                        tse_csroffs(rx_pause_mac_ctrl_frames));
+       buf[8] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_in_errors));
+       buf[9] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_out_errors));
+       buf[10] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_ucast_pkts));
+       buf[11] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_multicast_pkts));
+       buf[12] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_broadcast_pkts));
+       buf[13] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_discards));
+       buf[14] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_ucast_pkts));
+       buf[15] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_multicast_pkts));
+       buf[16] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_broadcast_pkts));
+       buf[17] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_drop_events));
 
        /* Extended etherStatsOctets counter */
-       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-       ext |= ioread32(&mac->ether_stats_octets);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_ether_stats_octets)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(ether_stats_octets));
        buf[18] = ext;
 
-       buf[19] = ioread32(&mac->ether_stats_pkts);
-       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-       buf[29] = ioread32(&mac->ether_stats_jabbers);
-       buf[30] = ioread32(&mac->ether_stats_fragments);
+       buf[19] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts));
+       buf[20] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_undersize_pkts));
+       buf[21] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_oversize_pkts));
+       buf[22] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_64_octets));
+       buf[23] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_65to127_octets));
+       buf[24] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_128to255_octets));
+       buf[25] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_256to511_octets));
+       buf[26] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_512to1023_octets));
+       buf[27] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1024to1518_octets));
+       buf[28] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1519tox_octets));
+       buf[29] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_jabbers));
+       buf[30] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_fragments));
 }
 
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
        int i;
        struct altera_tse_private *priv = netdev_priv(dev);
-       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
        u32 *buf = regbuf;
 
        /* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        regs->version = 1;
 
        for (i = 0; i < TSE_NUM_REGS; i++)
-               buf[i] = ioread32(&tse_mac_regs[i]);
+               buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index e44a4aeb970142a6622d77c52b30a27c58a6e786..7330681574d20ccd02855beabedc9b01c8ae2b48 100644 (file)
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-       u32 data;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* get the data */
-       data = ioread32(&mdio_regs[regnum]) & 0xffff;
-       return data;
+       return csrrd32(priv->mac_dev,
+                      tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                                 u16 value)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* write the data */
-       iowrite32((u32) value, &mdio_regs[regnum]);
+       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
        return 0;
 }
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                mdio->irq[i] = PHY_POLL;
 
-       mdio->priv = priv->mac_dev;
+       mdio->priv = dev;
        mdio->parent = priv->device;
 
        ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        enum netdev_tx ret = NETDEV_TX_OK;
        dma_addr_t dma_addr;
-       int txcomplete = 0;
 
        spin_lock_bh(&priv->tx_lock);
 
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(priv->device, buffer->dma_addr,
                                   buffer->len, DMA_TO_DEVICE);
 
-       txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+       priv->dmaops->tx_buffer(priv, buffer);
 
        skb_tx_timestamp(skb);
 
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
        struct altera_tse_private *priv = netdev_priv(dev);
        struct phy_device *phydev = NULL;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-       int ret;
 
        if (priv->phy_addr != POLL_PHY) {
                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
                        netdev_err(dev, "Could not attach to PHY\n");
 
        } else {
+               int ret;
                phydev = phy_find_first(priv->mdio);
                if (phydev == NULL) {
                        netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        u32 msb;
        u32 lsb;
 
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
        lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
        /* Set primary MAC address */
-       iowrite32(msb, &mac->mac_addr_0);
-       iowrite32(lsb, &mac->mac_addr_1);
+       csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+       csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 
 /* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
 static int reset_mac(struct altera_tse_private *priv)
 {
-       void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
        int counter;
        u32 dat;
 
-       dat = ioread32(cmd_cfg_reg);
+       dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
        dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-       iowrite32(dat, cmd_cfg_reg);
+       csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+               if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+                                    MAC_CMDCFG_SW_RESET))
                        break;
                udelay(1);
        }
 
        if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               dat = ioread32(cmd_cfg_reg);
+               dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
                dat &= ~MAC_CMDCFG_SW_RESET;
-               iowrite32(dat, cmd_cfg_reg);
+               csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
                return -1;
        }
        return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 static int init_mac(struct altera_tse_private *priv)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        unsigned int cmd = 0;
        u32 frm_length;
 
        /* Setup Rx FIFO */
-       iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-                 &mac->rx_section_empty);
-       iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-       iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-       iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+       csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(rx_section_empty));
+
+       csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(rx_section_full));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(rx_almost_empty));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(rx_almost_full));
 
        /* Setup Tx FIFO */
-       iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-                 &mac->tx_section_empty);
-       iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-       iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-       iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+       csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(tx_section_empty));
+
+       csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(tx_section_full));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(tx_almost_empty));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(tx_almost_full));
 
        /* MAC Address Configuration */
        tse_update_mac_addr(priv, priv->dev->dev_addr);
 
        /* MAC Function Configuration */
        frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-       iowrite32(frm_length, &mac->frm_length);
-       iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+       csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+       csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+               tse_csroffs(tx_ipg_length));
 
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-       tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-                                        ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+       tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+                   ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+       tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+                     ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+                     ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
-       cmd = ioread32(&mac->command_config);
+       cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
        cmd &= ~MAC_CMDCFG_ETH_SPEED;
        cmd &= ~MAC_CMDCFG_ENA_10;
 
-       iowrite32(cmd, &mac->command_config);
+       csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 
-       iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+       csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+               tse_csroffs(pause_quanta));
 
        if (netif_msg_hw(priv))
                dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
-       u32 value = ioread32(&mac->command_config);
+       u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
        if (enable)
                value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
        else
                value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
-       iowrite32(value, &mac->command_config);
+       csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 
 /* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
        struct netdev_hw_addr *ha;
 
        /* clear the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(0, &(mac->hash_table[i]));
+               csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
        netdev_for_each_mc_addr(ha, dev) {
                unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
                        hash = (hash << 1) | xor_bit;
                }
-               iowrite32(1, &(mac->hash_table[hash]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
        }
 }
 
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
 
        /* set the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(1, &(mac->hash_table[i]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 
 /* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if (dev->flags & IFF_PROMISC)
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
 
        if (dev->flags & IFF_ALLMULTI)
                altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
            !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
        else
-               tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+                             MAC_CMDCFG_PROMIS_EN);
 
        spin_unlock(&priv->mac_cfg_lock);
 }
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
                of_property_read_bool(pdev->dev.of_node,
                                      "altr,has-hash-multicast-filter");
 
+       /* Set hash filter to not set for now until the
+        * multicast filter receive issue is debugged
+        */
+       priv->hash_filter = 0;
+
        /* get supplemental address settings for this instance */
        priv->added_unicast =
                of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
        .altera_dtype = ALTERA_DTYPE_SGDMA,
        .dmamask = 32,
        .reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
        .start_rxdma = sgdma_start_rxdma,
 };
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
        .altera_dtype = ALTERA_DTYPE_MSGDMA,
        .dmamask = 64,
        .reset_dma = msgdma_reset,
index 70fa13f486b2fc4ef1d6fc45bb82977d13ea1930..d7eeb1713ad2b85721a533fbcc469d22db5740ea 100644 (file)
 #include "altera_tse.h"
 #include "altera_utils.h"
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value |= bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value &= ~bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 1 : 0;
 }
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 0 : 1;
 }
index ce1db36d35832a974f9f958f0b57c298343c0967..baf100ccf5872c7c18ac365ddfe314308b78c9b5 100644 (file)
@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 #endif /* __ALTERA_UTILS_H__*/
index b260913db23609ba34daf1f0e7d2165a7c2cc867..3b0d43154e677bfe8fd1f66aac7d3c0a6afbcfd5 100644 (file)
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)     (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)     (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
        u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
+               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                         * cleaning methods - might be redundant but harmless.
                         */
                        if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               bnx2x_prev_unload_undi_mf(bp);
+                               if (need_write) {
+                                       bnx2x_prev_unload_undi_mf(bp);
+                                       need_write = false;
+                               }
                        } else if (prev_undi) {
                                /* If UNDI resides in memory,
                                 * manually increment it
index 81cc2d9831c2192edcf414fa90dd4aa5962285aa..b8078d50261bf0944f456acc466c73c138b03306 100644 (file)
@@ -2695,7 +2695,7 @@ out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
-       return 0;
+       return rc;
 }
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
index 0c067e8564dd4e15e43a7e22f3e064091234b99a..784c7155b98a1977f2b809efe4ee5a45b23dc44b 100644 (file)
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-       return 0;
+       return rc;
 }
 
 /* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644 (file)
index 0000000..4884205
--- /dev/null
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC    20000
+
+#define INFO_BLOCK_SIZE                0x10
+#define INFO_BLOCK_TYPE                0x0
+#define INFO_BLOCK_REV         0x2
+#define INFO_BLOCK_BLK_CNT     0x4
+#define INFO_BLOCK_TX_CHAN     0x4
+#define INFO_BLOCK_RX_CHAN     0x5
+#define INFO_BLOCK_OFFSET      0x8
+
+#define EC_MII_OFFSET          0x4
+#define EC_FIFO_OFFSET         0x8
+#define EC_MAC_OFFSET          0xc
+
+#define MAC_FRAME_ERR_CNT      0x0
+#define MAC_RX_ERR_CNT         0x1
+#define MAC_CRC_ERR_CNT                0x2
+#define MAC_LNK_LST_ERR_CNT    0x3
+#define MAC_TX_FRAME_CNT       0x10
+#define MAC_RX_FRAME_CNT       0x14
+#define MAC_TX_FIFO_LVL                0x20
+#define MAC_DROPPED_FRMS       0x28
+#define MAC_CONNECTED_CCAT_FLAG        0x78
+
+#define MII_MAC_ADDR           0x8
+#define MII_MAC_FILT_FLAG      0xe
+#define MII_LINK_STATUS                0xf
+
+#define FIFO_TX_REG            0x0
+#define FIFO_TX_RESET          0x8
+#define FIFO_RX_REG            0x10
+#define FIFO_RX_ADDR_VALID     (1u << 31)
+#define FIFO_RX_RESET          0x18
+
+#define DMA_CHAN_OFFSET                0x1000
+#define DMA_CHAN_SIZE          0x8
+
+#define DMA_WINDOW_SIZE_MASK   0xfffffffc
+
+static struct pci_device_id ids[] = {
+       { PCI_DEVICE(0x15ec, 0x5000), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK   0xffffffu
+#define RXHDR_NEXT_VALID       (1u << 31)
+       __le32 next;
+#define RXHDR_NEXT_RECV_FLAG   0x1
+       __le32 recv;
+#define RXHDR_LEN_MASK         0xfffu
+       __le16 len;
+       __le16 port;
+       __le32 reserved;
+       u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE       0x7e8
+struct rx_desc {
+       struct rx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+       __le16 len;
+#define TX_HDR_PORT_0          0x1
+#define TX_HDR_PORT_1          0x2
+       u8 port;
+       u8 ts_enable;
+#define TX_HDR_SENT            0x1
+       __le32 sent;
+       u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+       struct tx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE              64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+       u8 *buf;
+       size_t len;
+       dma_addr_t buf_phys;
+
+       u8 *alloc;
+       size_t alloc_len;
+       dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+       struct net_device *net_dev;
+
+       struct pci_dev *dev;
+
+       void * __iomem io;
+       void * __iomem dma_io;
+
+       struct hrtimer hrtimer;
+
+       int tx_dma_chan;
+       int rx_dma_chan;
+       void * __iomem ec_io;
+       void * __iomem fifo_io;
+       void * __iomem mii_io;
+       void * __iomem mac_io;
+
+       struct bhf_dma rx_buf;
+       struct rx_desc *rx_descs;
+       int rx_dnext;
+       int rx_dcount;
+
+       struct bhf_dma tx_buf;
+       struct tx_desc *tx_descs;
+       int tx_dcount;
+       int tx_dnext;
+
+       u64 stat_rx_bytes;
+       u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID     0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       dev_dbg(dev, "Frame error counter: %d\n",
+               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+       dev_dbg(dev, "RX error counter: %d\n",
+               ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+       dev_dbg(dev, "CRC error counter: %d\n",
+               ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+       dev_dbg(dev, "TX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+       dev_dbg(dev, "RX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+       dev_dbg(dev, "TX fifo level: %d\n",
+               ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+       dev_dbg(dev, "Dropped frames: %d\n",
+               ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+       dev_dbg(dev, "Connected with CCAT slot: %d\n",
+               ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+       dev_dbg(dev, "Link status: %d\n",
+               ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+       iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+       iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+       iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+       iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+       iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+       iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+       iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+       u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+       u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+       iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+       dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+       return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+       if (unlikely(netif_queue_stopped(priv->net_dev))) {
+               /* Make sure that we perceive changes to tx_dnext. */
+               smp_rmb();
+
+               if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+                       netif_wake_queue(priv->net_dev);
+       }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+       return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+       iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+                 priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+       struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       while (ec_bhf_pkt_received(desc)) {
+               int pkt_size = (le16_to_cpu(desc->header.len) &
+                              RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+               u8 *data = desc->data;
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+               dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, pkt_size), data, pkt_size);
+                       skb->protocol = eth_type_trans(skb, priv->net_dev);
+                       dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+                       priv->stat_rx_bytes += pkt_size;
+
+                       netif_rx(skb);
+               } else {
+                       dev_err_ratelimited(dev,
+                               "Couldn't allocate a skb_buff for a packet of size %u\n",
+                               pkt_size);
+               }
+
+               desc->header.recv = 0;
+
+               ec_bhf_add_rx_desc(priv, desc);
+
+               priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+               desc = &priv->rx_descs[priv->rx_dnext];
+       }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+       struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+                                               hrtimer);
+       ec_bhf_process_rx(priv);
+       ec_bhf_process_tx(priv);
+
+       if (!netif_running(priv->net_dev))
+               return HRTIMER_NORESTART;
+
+       hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+       return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+       unsigned block_count, i;
+       void * __iomem ec_info;
+
+       dev_dbg(dev, "Info block:\n");
+       dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+       dev_dbg(dev, "Revision of function: %x\n",
+               (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+       block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+       dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+       for (i = 0; i < block_count; i++) {
+               u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+                                   INFO_BLOCK_TYPE);
+               if (type == ETHERCAT_MASTER_ID)
+                       break;
+       }
+       if (i == block_count) {
+               dev_err(dev, "EtherCAT master with DMA block not found\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+       ec_info = priv->io + i * INFO_BLOCK_SIZE;
+       dev_dbg(dev, "EtherCAT master revision: %d\n",
+               ioread16(ec_info + INFO_BLOCK_REV));
+
+       priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+       dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+               priv->tx_dma_chan);
+
+       priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+       dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+                priv->rx_dma_chan);
+
+       priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+       priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+       priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+       priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+       dev_dbg(dev,
+               "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+               priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+       return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+                                    struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct tx_desc *desc;
+       unsigned len;
+
+       dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+       desc = &priv->tx_descs[priv->tx_dnext];
+
+       skb_copy_and_csum_dev(skb, desc->data);
+       len = skb->len;
+
+       memset(&desc->header, 0, sizeof(desc->header));
+       desc->header.len = cpu_to_le16(len);
+       desc->header.port = TX_HDR_PORT_0;
+
+       ec_bhf_send_packet(priv, desc);
+
+       priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+       if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+               /* Make sure that update updates to tx_dnext are perceived
+                * by timer routine.
+                */
+               smp_wmb();
+
+               netif_stop_queue(net_dev);
+
+               dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+               ec_bhf_print_status(priv);
+       }
+
+       priv->stat_tx_bytes += len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+                               struct bhf_dma *buf,
+                               int channel,
+                               int size)
+{
+       int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+       struct device *dev = PRIV_TO_DEV(priv);
+       u32 mask;
+
+       iowrite32(0xffffffff, priv->dma_io + offset);
+
+       mask = ioread32(priv->dma_io + offset);
+       mask &= DMA_WINDOW_SIZE_MASK;
+       dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+       /* We want to allocate a chunk of memory that is:
+        * - aligned to the mask we just read
+        * - is of size 2^mask bytes (at most)
+        * In order to ensure that we will allocate buffer of
+        * 2 * 2^mask bytes.
+        */
+       buf->len = min_t(int, ~mask + 1, size);
+       buf->alloc_len = 2 * buf->len;
+
+       dev_dbg(dev, "Allocating %d bytes for channel %d",
+               (int)buf->alloc_len, channel);
+       buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+                                       GFP_KERNEL);
+       if (buf->alloc == NULL) {
+               dev_info(dev, "Failed to allocate buffer\n");
+               return -ENOMEM;
+       }
+
+       buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+       buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+       iowrite32(0, priv->dma_io + offset + 4);
+       iowrite32(buf->buf_phys, priv->dma_io + offset);
+       dev_dbg(dev, "Buffer: %x and read from dev: %x",
+               (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+       return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+       int i = 0;
+
+       priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+       priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+       priv->tx_dnext = 0;
+
+       for (i = 0; i < priv->tx_dcount; i++)
+               priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+       int i;
+
+       priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+       priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+       priv->rx_dnext = 0;
+
+       for (i = 0; i < priv->rx_dcount; i++) {
+               struct rx_desc *desc = &priv->rx_descs[i];
+               u32 next;
+
+               if (i != priv->rx_dcount - 1)
+                       next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+               else
+                       next = 0;
+               next |= RXHDR_NEXT_VALID;
+               desc->header.next = cpu_to_le32(next);
+               desc->header.recv = 0;
+               ec_bhf_add_rx_desc(priv, desc);
+       }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+       int err = 0;
+
+       dev_info(dev, "Opening device\n");
+
+       ec_bhf_reset(priv);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct rx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate rx buffer\n");
+               goto out;
+       }
+       ec_bhf_setup_rx_descs(priv);
+
+       dev_info(dev, "RX buffer allocated, address: %x\n",
+                (unsigned)priv->rx_buf.buf_phys);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct tx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate tx buffer\n");
+               goto error_rx_free;
+       }
+       dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+               (unsigned)priv->tx_buf.buf_phys);
+
+       iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+       ec_bhf_setup_tx_descs(priv);
+
+       netif_start_queue(net_dev);
+
+       hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->hrtimer.function = ec_bhf_timer_fun;
+       hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+                     HRTIMER_MODE_REL);
+
+       dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+       ec_bhf_print_status(priv);
+
+       return 0;
+
+error_rx_free:
+       dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+                         priv->rx_buf.alloc_len);
+out:
+       return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       hrtimer_cancel(&priv->hrtimer);
+
+       ec_bhf_reset(priv);
+
+       netif_tx_disable(net_dev);
+
+       dma_free_coherent(dev, priv->tx_buf.alloc_len,
+                         priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+       dma_free_coherent(dev, priv->rx_buf.alloc_len,
+                         priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+                struct rtnl_link_stats64 *stats)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+       stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+       stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+       stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+       stats->tx_bytes = priv->stat_tx_bytes;
+       stats->rx_bytes = priv->stat_rx_bytes;
+
+       return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+       .ndo_start_xmit         = ec_bhf_start_xmit,
+       .ndo_open               = ec_bhf_open,
+       .ndo_stop               = ec_bhf_stop,
+       .ndo_get_stats64        = ec_bhf_get_stats,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct net_device *net_dev;
+       struct ec_bhf_priv *priv;
+       void * __iomem dma_io;
+       void * __iomem io;
+       int err = 0;
+
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+
+       pci_set_master(dev);
+
+       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               err = -EIO;
+               goto err_disable_dev;
+       }
+
+       err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               goto err_disable_dev;
+       }
+
+       err = pci_request_regions(dev, "ec_bhf");
+       if (err) {
+               dev_err(&dev->dev, "Failed to request pci memory regions\n");
+               goto err_disable_dev;
+       }
+
+       io = pci_iomap(dev, 0, 0);
+       if (!io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+               err = -EIO;
+               goto err_release_regions;
+       }
+
+       dma_io = pci_iomap(dev, 2, 0);
+       if (!dma_io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+               err = -EIO;
+               goto err_unmap;
+       }
+
+       net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+       if (net_dev == 0) {
+               err = -ENOMEM;
+               goto err_unmap_dma_io;
+       }
+
+       pci_set_drvdata(dev, net_dev);
+       SET_NETDEV_DEV(net_dev, &dev->dev);
+
+       net_dev->features = 0;
+       net_dev->flags |= IFF_NOARP;
+
+       net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+       priv = netdev_priv(net_dev);
+       priv->net_dev = net_dev;
+       priv->io = io;
+       priv->dma_io = dma_io;
+       priv->dev = dev;
+
+       err = ec_bhf_setup_offsets(priv);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+       dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+               net_dev->dev_addr);
+
+       err = register_netdev(net_dev);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       return 0;
+
+err_free_net_dev:
+       free_netdev(net_dev);
+err_unmap_dma_io:
+       pci_iounmap(dev, dma_io);
+err_unmap:
+       pci_iounmap(dev, io);
+err_release_regions:
+       pci_release_regions(dev);
+err_disable_dev:
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+
+       return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+       struct net_device *net_dev = pci_get_drvdata(dev);
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       unregister_netdev(net_dev);
+       free_netdev(net_dev);
+
+       pci_iounmap(dev, priv->dma_io);
+       pci_iounmap(dev, priv->io);
+       pci_release_regions(dev);
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+       .name           = "ec_bhf",
+       .id_table       = ids,
+       .probe          = ec_bhf_probe,
+       .remove         = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+       return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+       pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
index a18645407d2152b43353a50b76ccf6317ef90151..dc19bc5dec7732c02220e009ad08ffc393aa1c2c 100644 (file)
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
+       /* On some BE3 FW versions, after a HW reset,
+        * interrupts will remain disabled for each function.
+        * So, explicitly enable interrupts
+        */
+       be_intr_set(adapter, true);
+
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
index b0c6050479eb460ae306cccaa93926f738e64c2e..b78378cea5e39b6e18627b9f39ac5249c40293fd 100644 (file)
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
        return idx;
 }
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
                struct txdesc *txdesc,
                struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
                                len,
                                PCI_DMA_TODEVICE);
 
+       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+               return -EINVAL;
+
        pci_dma_sync_single_for_device(pdev,
                                       dmaaddr,
                                       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
        txbi->mapping = dmaaddr;
        txbi->len = len;
+       return 0;
 }
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+       struct jme_ring *txring = &(jme->txring[0]);
+       struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+       int mask = jme->tx_ring_mask;
+       int j;
+
+       for (j = 0 ; j < count ; j++) {
+               ctxbi = txbi + ((startidx + j + 2) & (mask));
+               pci_unmap_page(jme->pdev,
+                               ctxbi->mapping,
+                               ctxbi->len,
+                               PCI_DMA_TODEVICE);
+
+                               ctxbi->mapping = 0;
+                               ctxbi->len = 0;
+       }
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
        struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
        u32 len;
+       int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
                frag = &skb_shinfo(skb)->frags[i];
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+               ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
                                skb_frag_page(frag),
                                frag->page_offset, skb_frag_size(frag), hidma);
+               if (ret) {
+                       jme_drop_tx_map(jme, idx, i);
+                       goto out;
+               }
+
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
        ctxdesc = txdesc + ((idx + 1) & (mask));
        ctxbi = txbi + ((idx + 1) & (mask));
-       jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+       ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
                        offset_in_page(skb->data), len, hidma);
+       if (ret)
+               jme_drop_tx_map(jme, idx, i);
+
+out:
+       return ret;
 
 }
 
+
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct txdesc *txdesc;
        struct jme_buffer_info *txbi;
        u8 flags;
+       int ret = 0;
 
        txdesc = (struct txdesc *)txring->desc + idx;
        txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-       jme_map_tx_skb(jme, skb, idx);
+       ret = jme_map_tx_skb(jme, skb, idx);
+       if (ret)
+               return ret;
+
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       jme_fill_tx_desc(jme, skb, idx);
+       if (jme_fill_tx_desc(jme, skb, idx))
+               return NETDEV_TX_OK;
 
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
index 78099eab767374319c7e258bfa1f0d6df4c64fa3..92d3249f63f19a71b5ab9ed2e661cb4a8048134a 100644 (file)
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
        },
        {
                .opcode = MLX4_CMD_UPDATE_QP,
-               .has_inbox = false,
+               .has_inbox = true,
                .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_CMD_EPERM_wrapper
+               .wrapper = mlx4_UPDATE_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
index f9c46510196341a6089b0a23d7b53455dad69ae5..212cea440f90c73424d777f94c71fe61a0c48543 100644 (file)
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_cmd_mailbox *outbox,
                           struct mlx4_cmd_info *cmd);
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
index 61d64ebffd56e64b0fa8bf2d0fb69308e3d02c49..fbd32af89c7c0c2b94a36faec71f4e004048050d 100644 (file)
@@ -389,6 +389,41 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_update_qp_context *cmd;
+       u64 pri_addr_path_mask = 0;
+       int err = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+       if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+               return -EINVAL;
+
+       if (attr & MLX4_UPDATE_QP_SMAC) {
+               pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+               cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+       }
+
+       cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
index 1c3fdd4a1f7df3fe84847ae7ff278d652db13463..8f1254a79832c24b96b227de0c30ba3ed0116520 100644 (file)
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 }
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd_info)
+{
+       int err;
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       struct res_qp *rqp;
+       u64 mac;
+       unsigned port;
+       u64 pri_addr_path_mask;
+       struct mlx4_update_qp_context *cmd;
+       int smac_index;
+
+       cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+       pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+       if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+           (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+               return -EPERM;
+
+       /* Just change the smac for the QP */
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err) {
+               mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+               return err;
+       }
+
+       port = (rqp->sched_queue >> 6 & 1) + 1;
+       smac_index = cmd->qp_context.pri_path.grh_mylmc;
+       err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                       smac_index, &mac);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                        qpn, smac_index);
+               goto err_mac;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma,
+                      vhcr->in_modifier, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+               goto err_mac;
+       }
+
+err_mac:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
index 7b52a88923ef2e53fadf9aca0185e2af492aa7be..f785d01c7d123dde875a774ebf1027800af33425 100644 (file)
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-                                            struct net_device *netdev)
-{
-       int err;
-
-       netdev->num_tx_queues = adapter->drv_tx_rings;
-       netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-       err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-       if (err)
-               netdev_err(netdev, "failed to set %d Tx queues\n",
-                          adapter->drv_tx_rings);
-
-       return err;
-}
-
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
index 0bc914859e38be2c52e070edb82061fa2a6a1e48..7e55e88a81bf26ede0928225fa85998254280195 100644 (file)
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
-       err = qlcnic_set_real_num_queues(adapter, netdev);
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
        if (err)
                return err;
 
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                            tx_ring->tx_stats.xmit_called,
                            tx_ring->tx_stats.xmit_on,
                            tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
                netdev_info(netdev,
-                           "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-                           readl(tx_ring->crb_intr_mask),
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
                            readl(tx_ring->crb_cmd_producer),
                            tx_ring->producer, tx_ring->sw_consumer,
                            le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
        int err;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
        netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
                return err;
        }
 
-       netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
 
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_initialize_nic(adapter, 1);
index 32d969e857f7befc79bf4a6f18cb153c350b374b..89b83e59e1dc601898ddd60bd0fa704fdd7b6d43 100644 (file)
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
        efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 
-       /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel(channel, efx)
-               free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-       /* Disable legacy interrupt */
-       if (efx->legacy_irq)
+       if (EFX_INT_MODE_USE_MSI(efx)) {
+               /* Disable MSI/MSI-X interrupts */
+               efx_for_each_channel(channel, efx)
+                       free_irq(channel->irq,
+                                &efx->msi_context[channel->channel]);
+       } else {
+               /* Disable legacy interrupt */
                free_irq(efx->legacy_irq, efx);
+       }
 }
 
 /* Register dump */
index d940034acdd4aa465153f80ae0d5881cb648d6a8..0f4841d2e8dc9b556bde072a1786697ea7041460 100644 (file)
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
                if (ret) {
                        pr_err("%s: Cannot attach to PHY (error: %d)\n",
                               __func__, ret);
-                       goto phy_error;
+                       return ret;
                }
        }
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
-phy_error:
-       clk_disable_unprepare(priv->stmmac_clk);
 
        return ret;
 }
index df8d383acf48ed0da087bb19d144499484ac0376..b9ac20f42651bd90e33e5fcb3da38db5401117a2 100644 (file)
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
+               spin_lock_nested(&cp->tx_lock[i], i);
 }
 
 static inline void cas_lock_all(struct cas *cp)
index 36aa109416c4c3a387a440795a8d3611cf3d4ded..c331b7ebc8124585f989d6fb3dcdbaf8a3d7d3ed 100644 (file)
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
-
-               if (strncmp(mdio->name, "gpio", 4) == 0) {
-                       /* GPIO bitbang MDIO driver attached */
-                       struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, bus->id, phyid);
-               } else {
-                       /* davinci MDIO driver attached */
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, mdio->name, phyid);
+               of_node_put(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
                }
+               snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+                        PHY_ID_FMT, mdio->name, phyid);
 
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr)
index b0e2865a6810782e115d61287ed23578646a855e..d53e299ae1d97ca39c4f3af511b97c5dacb32827 100644 (file)
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
 
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       }
 }
 
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
                                          struct netdev_queue *txq,
                                          void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class(&dev->addr_list_lock,
-                         &macvlan_netdev_addr_lock_key);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macvlan_netdev_addr_lock_key,
+                                      macvlan_get_nest_level(dev));
        netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
+       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
+       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 9c4defdec67b09299f38f1b06bf8eacbccd007d1..5f1a2250018fec5ba01a2a1a1a11f2d736f544a9 100644 (file)
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
                bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
        } else {
                pdata = dev_get_platdata(&pdev->dev);
                bus_id = pdev->id;
index a972056b22498c15596a88f6b348e0f066ddb85b..3bc079a67a3dc85a222e2b784f7f65ec55dc6b59 100644 (file)
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       int needs_aneg = 0, do_suspend = 0;
+       bool needs_aneg = false, do_suspend = false, do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
        case PHY_PENDING:
                break;
        case PHY_UP:
-               needs_aneg = 1;
+               needs_aneg = true;
 
                phydev->link_timeout = PHY_AN_TIMEOUT;
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->adjust_link(phydev->attached_dev);
 
                } else if (0 == phydev->link_timeout--)
-                       needs_aneg = 1;
+                       needs_aneg = true;
                break;
        case PHY_NOLINK:
                err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
                        netif_carrier_on(phydev->attached_dev);
                } else {
                        if (0 == phydev->link_timeout--)
-                               needs_aneg = 1;
+                               needs_aneg = true;
                }
 
                phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->link = 0;
                        netif_carrier_off(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
-                       do_suspend = 1;
+                       do_suspend = true;
                }
                break;
        case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
+               do_resume = true;
                break;
        }
 
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
        if (needs_aneg)
                err = phy_start_aneg(phydev);
-
-       if (do_suspend)
+       else if (do_suspend)
                phy_suspend(phydev);
+       else if (do_resume)
+               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
index 0ce606624296a80492b18d89a27634614aad5497..4987a1c6dc52e63220edb6fc94eea925a9fdf122 100644 (file)
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        err = phy_init_hw(phydev);
        if (err)
                phy_detach(phydev);
-
-       phy_resume(phydev);
+       else
+               phy_resume(phydev);
 
        return err;
 }
index c9f3281506af568e534a47789418b466a0a77adc..2e025ddcef210bc888dd8a8ff81473a32c3dd154 100644 (file)
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
        cdc_ncm_unbind(dev, intf);
 }
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+       switch (proto) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
        struct cdc_ncm_ctx *ctx = info->ctx;
        __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
        u16 tci = 0;
+       bool is_ip;
        u8 *c;
 
        if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                if (skb->len <= ETH_HLEN)
                        goto error;
 
+               /* Some applications using e.g. packet sockets will
+                * bypass the VLAN acceleration and create tagged
+                * ethernet frames directly.  We primarily look for
+                * the accelerated out-of-band tag, but fall back if
+                * required
+                */
+               skb_reset_mac_header(skb);
+               if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+                   __vlan_get_tag(skb, &tci) == 0) {
+                       is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+                       skb_pull(skb, VLAN_ETH_HLEN);
+               } else {
+                       is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+                       skb_pull(skb, ETH_HLEN);
+               }
+
                /* mapping VLANs to MBIM sessions:
                 *   no tag     => IPS session <0>
                 *   1 - 255    => IPS session <vlanid>
                 *   256 - 511  => DSS session <vlanid - 256>
                 *   512 - 4095 => unsupported, drop
                 */
-               vlan_get_tag(skb, &tci);
-
                switch (tci & 0x0f00) {
                case 0x0000: /* VLAN ID 0 - 255 */
-                       /* verify that datagram is IPv4 or IPv6 */
-                       skb_reset_mac_header(skb);
-                       switch (eth_hdr(skb)->h_proto) {
-                       case htons(ETH_P_IP):
-                       case htons(ETH_P_IPV6):
-                               break;
-                       default:
+                       if (!is_ip)
                                goto error;
-                       }
                        c = (u8 *)&sign;
                        c[3] = tci;
                        break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                                  "unsupported tci=0x%04x\n", tci);
                        goto error;
                }
-               skb_pull(skb, ETH_HLEN);
        }
 
        spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                return;
 
        /* need to send the NA on the VLAN dev, if any */
-       if (tci)
+       rcu_read_lock();
+       if (tci) {
                netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
                                              tci);
-       else
+               if (!netdev) {
+                       rcu_read_unlock();
+                       return;
+               }
+       } else {
                netdev = dev->net;
-       if (!netdev)
-               return;
+       }
+       dev_hold(netdev);
+       rcu_read_unlock();
 
        in6_dev = in6_dev_get(netdev);
        if (!in6_dev)
-               return;
+               goto out;
        is_router = !!in6_dev->cnf.forwarding;
        in6_dev_put(in6_dev);
 
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                                 true /* solicited */,
                                 false /* override */,
                                 true /* inc_opt */);
+out:
+       dev_put(netdev);
 }
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
index f46cd0250e488217ca4aa517be12924e3b61c6a8..5627917c5ff761137061467e1b9d71f281ce0652 100644 (file)
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
        if ((vif->type == NL80211_IFTYPE_AP ||
             vif->type == NL80211_IFTYPE_MESH_POINT) &&
-           bss_conf->enable_beacon)
+           bss_conf->enable_beacon) {
                priv->reconfig_beacon = true;
+               priv->rearm_ani = true;
+       }
 
        if (bss_conf->assoc) {
                priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
        ath9k_htc_ps_wakeup(priv);
 
+       ath9k_htc_stop_ani(priv);
        del_timer_sync(&priv->tx.cleanup_timer);
        ath9k_htc_tx_drain(priv);
 
index afb3d15e38ff0379a99c5e2c534be23c57b94e38..be1985296bdc75e725cdc161aa4101cea4a19476 100644 (file)
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
        if (!err) {
                /* only set 2G bandwidth using bw_cap command */
                band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
                err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
                                               sizeof(band_bwcap));
        } else {
index fa858d548d13c0bd794b98dc4da2053893b460dc..0489314425cbdf4a4b782867644ee9a0a0ca4b63 100644 (file)
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
        if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
        }
 
        if (IWL_MVM_BT_COEX_MPLUT) {
                bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
        }
 
        if (mvm->cfg->bt_shared_single_ant)
index 9426905de6b283dc0230cf51d5a694478da7797a..d73a89ecd78aa0963c40a268fc7dd8e3d7fbe29d 100644 (file)
@@ -183,9 +183,9 @@ enum iwl_scan_type {
  *     this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in usecs (suspend before executing)
+ *     bits 0-19: beacon interal in TUs (suspend before executing)
  *     bits 20-23: reserved
  *     bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:     quiet channel num of packets threshold
  * @good_CRC_th:       passive to active promotion threshold
  * @rx_chain:          RXON rx chain.
- * @max_out_time:      max uSec to be out of assoceated channel
- * @suspend_time:      pause scan this long when returning to service channel
+ * @max_out_time:      max TUs to be out of assoceated channel
+ * @suspend_time:      pause scan this TUs when returning to service channel
  * @flags:             RXON flags
  * @filter_flags:      RXONfilter
  * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
index f0cebf12c7b8415a3c787d0cc77a9b2b1c2a15ef..b41dc84e9431e6c625d55a28d5cd3ca7a885cf08 100644 (file)
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
                return;
 
-       ieee80211_iterate_active_interfaces(
+       ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_mc_iface_iterator, &iter_data);
 }
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (!iwl_mvm_is_idle(mvm)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_OS:
                IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
index d564233a65da6157c1aaf16a099ddf94b3be933e..f1ec0986c3c912865f0d51e1056ec03c786d8cc2 100644 (file)
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
        return mvmvif->low_latency;
 }
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
index 9f52c5b3f0ec0e9b2da5949f2af88bbcd13d89ce..e1c838899363b373d176a71bc32d02282d56c018 100644 (file)
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                return;
        }
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
        if (lq_sta->dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
index c91dc8498852c46653cc43fddb57c382d3d7f3f0..c28de54c75d400d551a0be87d4501cc98fb5967a 100644 (file)
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_bound);
-       /*
-        * Under low latency traffic passive scan is fragmented meaning
-        * that dwell on a particular channel will be fragmented. Each fragment
-        * dwell time is 20ms and fragments period is 105ms. Skipping to next
-        * channel will be delayed by the same period - 105ms. So suspend_time
-        * parameter describing both fragments and channels skipping periods is
-        * set to 105ms. This value is chosen so that overall passive scan
-        * duration will not be too long. Max_out_time in this case is set to
-        * 70ms, so for active scanning operating channel will be left for 70ms
-        * while for passive still for 20ms (fragment dwell).
-        */
-       if (global_bound) {
-               if (!iwl_mvm_low_latency(mvm)) {
-                       params->suspend_time = ieee80211_tu_to_usec(100);
-                       params->max_out_time = ieee80211_tu_to_usec(600);
-               } else {
-                       params->suspend_time = ieee80211_tu_to_usec(105);
-                       /* P2P doesn't support fragmented passive scan, so
-                        * configure max_out_time to be at least longest dwell
-                        * time for passive scan.
-                        */
-                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-                               params->max_out_time = ieee80211_tu_to_usec(70);
-                               params->passive_fragmented = true;
-                       } else {
-                               u32 passive_dwell;
 
-                               /*
-                                * Use band G so that passive channel dwell time
-                                * will be assigned with maximum value.
-                                */
-                               band = IEEE80211_BAND_2GHZ;
-                               passive_dwell = iwl_mvm_get_passive_dwell(band);
-                               params->max_out_time =
-                                       ieee80211_tu_to_usec(passive_dwell);
-                       }
-               }
+       if (!global_bound)
+               goto not_bound;
+
+       params->suspend_time = 100;
+       params->max_out_time = 600;
+
+       if (iwl_mvm_low_latency(mvm)) {
+               params->suspend_time = 250;
+               params->max_out_time = 250;
        }
 
+not_bound:
+
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].passive = 20;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(band,
                                                                      n_ssids);
        }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz;
+       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
index d619851745a19ba6d3bf605555fcdbd5a09f8341..2180902266ae6636513346df888c9d68abf0a57e 100644 (file)
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
        return result;
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *idle = _data;
+
+       if (!vif->bss_conf.idle)
+               *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+       bool idle = true;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_idle_iter, &idle);
+
+       return idle;
+}
index dcfd6d866d095081d7001795c4ec802c3044926f..2365553f1ef79d59c3e8598335e48eb43e5cfdcb 100644 (file)
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
+       trans->dev = &pdev->dev;
+       trans_pcie->pci_dev = pdev;
+       iwl_disable_interrupts(trans);
+
        err = pci_enable_msi(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        }
 
-       trans->dev = &pdev->dev;
-       trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_pci_disable_msi;
        }
 
-       trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_free_ict;
        }
 
+       trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
        return trans;
 
 out_free_ict:
index 630a3fcf65bc8113fd6b67ce587f26fbf46c9774..0d4a285cbd7edb45408360a83952ab288debf62d 100644 (file)
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
                              grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
index ef05c5c49d413d5bb23a3e4adbff88cb0c9ad7cd..20e9defa10606d7d54a0a5412eb93365ce748f5c 100644 (file)
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        work_done = xenvif_tx_action(vif, budget);
 
        if (work_done < budget) {
-               int more_to_do = 0;
-               unsigned long flags;
-
-               /* It is necessary to disable IRQ before calling
-                * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-                * lose event from the frontend.
-                *
-                * Consider:
-                *   RING_HAS_UNCONSUMED_REQUESTS
-                *   <frontend generates event to trigger napi_schedule>
-                *   __napi_complete
-                *
-                * This handler is still in scheduled state so the
-                * event has no effect at all. After __napi_complete
-                * this handler is descheduled and cannot get
-                * scheduled again. We lose event in this case and the ring
-                * will be completely stalled.
-                */
-
-               local_irq_save(flags);
-
-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-               if (!more_to_do)
-                       __napi_complete(napi);
-
-               local_irq_restore(flags);
+               napi_complete(napi);
+               xenvif_napi_schedule_or_enable_events(vif);
        }
 
        return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
index 76665405c5aac32d57eeadf355007cbb2b50cbec..7367208ee8cdd8b324ce661b48aa69c1d884855b 100644 (file)
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
        u16 pending_idx = ubuf->desc;
        struct pending_tx_info *temp =
@@ -322,6 +322,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        }
 }
 
+/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+                                               const int i,
+                                               const struct ubuf_info *ubuf)
+{
+       struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+       do {
+               u16 pending_idx = ubuf->desc;
+
+               if (skb_shinfo(skb)->frags[i].page.p ==
+                   foreign_vif->mmap_pages[pending_idx])
+                       break;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+       } while (ubuf);
+
+       return ubuf;
+}
+
 /*
  * Prepare an SKB to be transmitted to the frontend.
  *
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
-       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-       struct xenvif *foreign_vif = NULL;
+       const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       const struct ubuf_info *const head_ubuf = ubuf;
 
        old_meta_prod = npo->meta_prod;
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
-       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-                (ubuf->callback == &xenvif_zerocopy_callback)) {
-               int i = 0;
-               foreign_vif = ubuf_to_vif(ubuf);
-
-               do {
-                       u16 pending_idx = ubuf->desc;
-                       foreign_grefs[i++] =
-                               foreign_vif->pending_tx_info[pending_idx].req.gref;
-                       ubuf = (struct ubuf_info *) ubuf->ctx;
-               } while (ubuf);
-       }
-
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        }
 
        for (i = 0; i < nr_frags; i++) {
+               /* This variable also signals whether foreign_gref has a real
+                * value or not.
+                */
+               struct xenvif *foreign_vif = NULL;
+               grant_ref_t foreign_gref;
+
+               if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                       (ubuf->callback == &xenvif_zerocopy_callback)) {
+                       const struct ubuf_info *const startpoint = ubuf;
+
+                       /* Ideally ubuf points to the chain element which
+                        * belongs to this frag. Or if frags were removed from
+                        * the beginning, then shortly before it.
+                        */
+                       ubuf = xenvif_find_gref(skb, i, ubuf);
+
+                       /* Try again from the beginning of the list, if we
+                        * haven't tried from there. This only makes sense in
+                        * the unlikely event of reordering the original frags.
+                        * For injected local pages it's an unnecessary second
+                        * run.
+                        */
+                       if (unlikely(!ubuf) && startpoint != head_ubuf)
+                               ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+                       if (likely(ubuf)) {
+                               u16 pending_idx = ubuf->desc;
+
+                               foreign_vif = ubuf_to_vif(ubuf);
+                               foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+                               /* Just a safety measure. If this was the last
+                                * element on the list, the for loop will
+                                * iterate again if a local page were added to
+                                * the end. Using head_ubuf here prevents the
+                                * second search on the chain. Or the original
+                                * frags changed order, but that's less likely.
+                                * In any way, ubuf shouldn't be NULL.
+                                */
+                               ubuf = ubuf->ctx ?
+                                       (struct ubuf_info *) ubuf->ctx :
+                                       head_ubuf;
+                       } else
+                               /* This frag was a local page, added to the
+                                * array after the skb left netback.
+                                */
+                               ubuf = head_ubuf;
+               }
                xenvif_gop_frag_copy(vif, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
                                     &head,
                                     foreign_vif,
-                                    foreign_grefs[i]);
+                                    foreign_vif ? foreign_gref : UINT_MAX);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
                notify_remote_via_irq(vif->rx_irq);
 }
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
        int more_to_do;
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
        struct xenvif *vif = (struct xenvif *)data;
        tx_add_credit(vif);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_tx_err(struct xenvif *vif,
index 6963bdf5417593921122694d8ae425ff7a599f7e..6aea373547f65f3743faa7236b5035a83e178966 100644 (file)
@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 config PTP_1588_CLOCK
        tristate "PTP clock support"
+       depends on NET
        select PPS
        select NET_PTP_CLASSIFY
        help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86 || COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index 1b681427dde045cdad8f2455c07467fba4f077f1..c341f855fadcd433a6730db0d941317615a7fe9e 100644 (file)
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
        list_del(&rphy->list);
        mutex_unlock(&sas_host->lock);
 
-       sas_bsg_remove(shost, rphy);
-
        transport_destroy_device(dev);
 
        put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
        }
 
        sas_rphy_unlink(rphy);
+       sas_bsg_remove(NULL, rphy);
        transport_remove_device(dev);
        device_del(dev);
 }
index 1c8c6cc6de3097ceab15a5a16307e0b569ee6a29..4b0eff6da6740552043679764f0ebc76a47917a0 100644 (file)
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
 {
        _enter("");
 
+       /* Break the callbacks here so that we do it after the final ACK is
+        * received.  The step number here must match the final number in
+        * afs_deliver_cb_callback().
+        */
+       if (call->unmarshall == 6) {
+               ASSERT(call->server && call->count && call->request);
+               afs_break_callbacks(call->server, call->count, call->request);
+       }
+
        afs_put_server(call->server);
        call->server = NULL;
        kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                _debug("trailer");
                if (skb->len != 0)
                        return -EBADMSG;
+
+               /* Record that the message was unmarshalled successfully so
+                * that the call destructor can know do the callback breaking
+                * work, even if the final ACK isn't received.
+                *
+                * If the step number changes, then afs_cm_destructor() must be
+                * updated also.
+                */
+               call->unmarshall++;
+       case 6:
                break;
        }
 
index be75b500005d0d4f68b2d6e3855e71660016353d..590b55f46d61dd1ca169ff78a3dbf4f5d32b813c 100644 (file)
@@ -75,7 +75,7 @@ struct afs_call {
        const struct afs_call_type *type;       /* type of call */
        const struct afs_wait_mode *wait_mode;  /* completion wait mode */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
-       work_func_t             async_workfn;
+       void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
        struct work_struct      async_work;     /* asynchronous work processor */
        struct work_struct      work;           /* actual work processor */
        struct sk_buff_head     rx_queue;       /* received packets */
index ef943df73b8cdee2c6964439b81417a9c1110b12..03a3beb170048df40c436dff51c41373104cf9e7 100644 (file)
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static void afs_wake_up_async_call(struct afs_call *);
 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
-static void afs_process_async_call(struct work_struct *);
+static void afs_process_async_call(struct afs_call *);
 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
 
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
 static struct sk_buff_head afs_incoming_calls;
 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
 
+static void afs_async_workfn(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+       call->async_workfn(call);
+}
+
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -183,6 +190,28 @@ static void afs_free_call(struct afs_call *call)
        kfree(call);
 }
 
+/*
+ * End a call but do not free it
+ */
+static void afs_end_call_nofree(struct afs_call *call)
+{
+       if (call->rxcall) {
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+       }
+       if (call->type->destructor)
+               call->type->destructor(call);
+}
+
+/*
+ * End a call and free it
+ */
+static void afs_end_call(struct afs_call *call)
+{
+       afs_end_call_nofree(call);
+       afs_free_call(call);
+}
+
 /*
  * allocate a call with flat request and reply buffers
  */
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
               atomic_read(&afs_outstanding_calls));
 
        call->wait_mode = wait_mode;
-       INIT_WORK(&call->async_work, afs_process_async_call);
+       call->async_workfn = afs_process_async_call;
+       INIT_WORK(&call->async_work, afs_async_workfn);
 
        memset(&srx, 0, sizeof(srx));
        srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
        rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
        while ((skb = skb_dequeue(&call->rx_queue)))
                afs_free_skb(skb);
-       rxrpc_kernel_end_call(rxcall);
-       call->rxcall = NULL;
 error_kill_call:
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
        if (call->state >= AFS_CALL_COMPLETE) {
                while ((skb = skb_dequeue(&call->rx_queue)))
                        afs_free_skb(skb);
-               if (call->incoming) {
-                       rxrpc_kernel_end_call(call->rxcall);
-                       call->rxcall = NULL;
-                       call->type->destructor(call);
-                       afs_free_call(call);
-               }
+               if (call->incoming)
+                       afs_end_call(call);
        }
 
        _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        }
 
        _debug("call complete");
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
 /*
  * delete an asynchronous call
  */
-static void afs_delete_async_call(struct work_struct *work)
+static void afs_delete_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
  * - on a multiple-thread workqueue this work item may try to run on several
  *   CPUs at the same time
  */
-static void afs_process_async_call(struct work_struct *work)
+static void afs_process_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
                call->reply = NULL;
 
                /* kill the call */
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               if (call->type->destructor)
-                       call->type->destructor(call);
+               afs_end_call_nofree(call);
 
                /* we can't just delete the call because the work item may be
                 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
        call->reply_size += len;
 }
 
-static void afs_async_workfn(struct work_struct *work)
-{
-       struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-       call->async_workfn(work);
-}
-
 /*
  * accept the backlog of incoming calls
  */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        default:
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               call->type->destructor(call);
-               afs_free_call(call);
+               afs_end_call(call);
                _leave(" [error]");
                return;
        }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        call->state = AFS_CALL_AWAIT_ACK;
        n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
        if (n >= 0) {
+               /* Success */
                _leave(" [replied]");
                return;
        }
+
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        }
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" [error]");
 }
 
index b6f46013dddf26bc5306349a26160aa5c9450eb7..f66c66b9f18285a4084114679d0e1d3e555a253c 100644 (file)
@@ -590,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
                add_to_mask(state, &state->groups->aces[i].perms);
        }
 
-       if (!state->users->n && !state->groups->n) {
+       if (state->users->n || state->groups->n) {
                pace++;
                pace->e_tag = ACL_MASK;
                low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
index 32b699bebb9c3e7281ba58e1d123dc6c2f0ce361..9a77a5a21557c4e16740196c16d7f4f218ce2643 100644 (file)
@@ -3717,9 +3717,16 @@ out:
 static __be32
 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
 {
-       if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
+       struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
+
+       if (check_for_locks(stp->st_file, lo))
                return nfserr_locks_held;
-       release_lock_stateid(stp);
+       /*
+        * Currently there's a 1-1 lock stateid<->lockowner
+        * correspondance, and we have to delete the lockowner when we
+        * delete the lock stateid:
+        */
+       unhash_lockowner(lo);
        return nfs_ok;
 }
 
@@ -4159,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
 
        if (!same_owner_str(&lo->lo_owner, owner, clid))
                return false;
+       if (list_empty(&lo->lo_owner.so_stateids)) {
+               WARN_ON_ONCE(1);
+               return false;
+       }
        lst = list_first_entry(&lo->lo_owner.so_stateids,
                               struct nfs4_ol_stateid, st_perstateowner);
        return lst->st_file->fi_inode == inode;
index af3f7aa73e13a007d06fd528d8c8d101d2553087..ee1f88419cb0640d38203eb3a16c7b03094f7c56 100644 (file)
@@ -472,11 +472,15 @@ bail:
 
 void dlm_destroy_master_caches(void)
 {
-       if (dlm_lockname_cache)
+       if (dlm_lockname_cache) {
                kmem_cache_destroy(dlm_lockname_cache);
+               dlm_lockname_cache = NULL;
+       }
 
-       if (dlm_lockres_cache)
+       if (dlm_lockres_cache) {
                kmem_cache_destroy(dlm_lockres_cache);
+               dlm_lockres_cache = NULL;
+       }
 }
 
 static void dlm_lockres_release(struct kref *kref)
index 8300fb87b84ac1329d8facb1d560767c29c549d8..72cb0ddb9678d21eb2f2edd20a36b594b72160d9 100644 (file)
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 
 struct dmaengine_unmap_data {
+       u8 map_cnt;
        u8 to_cnt;
        u8 from_cnt;
        u8 bidi_cnt;
index 7c8b20b120eac680f27e3cd1e99630b1fe521799..a9a53b12397b0c4fd29a11cbf5f9cbbd18944fd8 100644 (file)
@@ -56,6 +56,7 @@ struct macvlan_dev {
        int                     numqueues;
        netdev_features_t       tap_features;
        int                     minor;
+       int                     nest_level;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
index 13bbbde00e68de454c8cf30f0581796d55eb0a40..b2acc4a1b13c9bd906869bc52d501f53f67d3c87 100644 (file)
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-static inline int is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
+       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
 
 extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG_ON(!is_vlan_dev(dev));
+       return vlan_dev_priv(dev)->nest_level;
+}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG();
+       return 0;
+}
 #endif
 
 static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
                 */
                skb->protocol = htons(ETH_P_802_2);
 }
+
 #endif /* !(_LINUX_IF_VLAN_H_) */
index b66e7610d4eec9f4d67e5f8bbd745bb6cbd3c99a..7040dc98ff8baa59118cd42dd728d3152afb7c86 100644 (file)
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
        __be32                  byte_count;
 };
 
+enum mlx4_update_qp_attr {
+       MLX4_UPDATE_QP_SMAC             = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+       u8      smac_index;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
                   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
index 94734a6259a4d9ee36e25b342c86d3c1bf9d5fcf..17d83393afcc4337d50f00cfa837030d9429c6f8 100644 (file)
@@ -248,24 +248,17 @@ do {                                                              \
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
                           struct static_key *done_key);
 
-#ifdef HAVE_JUMP_LABEL
-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
-               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
-#else /* !HAVE_JUMP_LABEL */
-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#endif /* HAVE_JUMP_LABEL */
-
 #define net_get_random_once(buf, nbytes)                               \
        ({                                                              \
                bool ___ret = false;                                    \
                static bool ___done = false;                            \
-               static struct static_key ___done_key =                  \
-                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
-               if (!static_key_true(&___done_key))                     \
+               static struct static_key ___once_key =                  \
+                       STATIC_KEY_INIT_TRUE;                           \
+               if (static_key_true(&___once_key))                      \
                        ___ret = __net_get_random_once(buf,             \
                                                       nbytes,          \
                                                       &___done,        \
-                                                      &___done_key);   \
+                                                      &___once_key);   \
                ___ret;                                                 \
        })
 
index 7ed3a3aa6604b7b8511ea709ea2c2312a1d38bb2..b42d07b0390b2e95c3e551bb6639b906472bf70b 100644 (file)
@@ -1144,6 +1144,7 @@ struct net_device_ops {
        netdev_tx_t             (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
                                                        struct net_device *dev,
                                                        void *priv);
+       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
 };
 
 /**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
 
 static inline void netif_addr_lock_nested(struct net_device *dev)
 {
-       spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+       int subclass = SINGLE_DEPTH_NESTING;
+
+       if (dev->netdev_ops->ndo_get_lock_subclass)
+               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+       spin_lock_nested(&dev->addr_list_lock, subclass);
 }
 
 static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,9 +3056,18 @@ extern int               weight_p;
 extern int             bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
 
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->adj_list.upper, \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
 /* iterate through upper list, must be called under RCU read lock */
 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
        for (iter = &(dev)->all_adj_list.upper, \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
             priv; \
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 
+void *netdev_lower_get_next(struct net_device *dev,
+                               struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            ldev = netdev_lower_get_next(dev, &(iter)); \
+            ldev; \
+            ldev = netdev_lower_get_next(dev, &(iter)))
+
 void *netdev_adjacent_get_private(struct list_head *adj_list);
 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev));
 int skb_checksum_help(struct sk_buff *skb);
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
@@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev);
-static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
-{
-       return netif_skb_dev_features(skb, skb->dev);
-}
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
index 6fe8464ed767f0dac6481a6ffc7b39ecaebdd648..881a7c3571f4617b99e0845995800d98eb4f9349 100644 (file)
@@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
-       return -ENOSYS;
+       /*
+        * Fall back to the non-DT function to register a bus.
+        * This way, we don't have to keep compat bits around in drivers.
+        */
+
+       return mdiobus_register(mdio);
 }
 
 static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
index 3356abcfff184e707eccb08d6f99042a8fd51acc..3ef6ea12806a297107ff65b2ad554f5176c8e301 100644 (file)
@@ -402,6 +402,8 @@ struct perf_event {
 
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
+       unsigned long                   rcu_batches;
+       int                             rcu_pending;
 
        /* poll related */
        wait_queue_head_t               waitq;
index 8e3e66ac0a5215d221042e15e631fb2fe2fb51d1..953937ea5233c770d631bf3ae59be60b72630d88 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
+#include <linux/wait.h>
 #include <uapi/linux/rtnetlink.h>
 
 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
 extern void rtnl_unlock(void);
 extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
 #else
index 25f54c79f75772a9f133c585e17a2d8e4a59e8ac..221b2bde372363765b5328638bf9320d9c95f5fd 100644 (file)
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #define TASK_PARKED            512
 #define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
         *
         * @dl_boosted tells if we are boosted due to DI. If so we are
         * outside bandwidth enforcement mechanism (but only until we
-        * exit the critical section).
+        * exit the critical section);
+        *
+        * @dl_yielded tells if task gave up the cpu before consuming
+        * all its available runtime during the last job.
         */
-       int dl_throttled, dl_new, dl_boosted;
+       int dl_throttled, dl_new, dl_boosted, dl_yielded;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index f3539a15c41103b743c0571913fbb93dc402f40a..f856e5a746fae66e2c91357cfe94fc89d0acae18 100644 (file)
@@ -3668,6 +3668,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
  */
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason.  The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
 /**
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
index 6c4f5eac98e7be133af4b868507f0d217ac05c1b..216cecce65e9e1200cd760de64c27ac852ffc85b 100644 (file)
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
 
 
 /*
index 80f500a29498e1fc9b8892e5c66be6bd02362eaa..b2704fd0ec80d714bc9e7dd9b87721da1853173c 100644 (file)
@@ -20,6 +20,11 @@ struct local_ports {
        int             range[2];
 };
 
+struct ping_group_range {
+       seqlock_t       lock;
+       kgid_t          range[2];
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -66,13 +71,13 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       struct local_ports sysctl_local_ports;
+       struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
 
-       kgid_t sysctl_ping_group_range[2];
+       struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
 
index 1ba9d626aa833db91c462560f27054b30e91939d..194c1eab04d8ad9cb37ae855d8c011d48722c829 100644 (file)
@@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+ *     here to reserve the value for API/ABI compatibility)
  * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
  *     equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
  *     mode
@@ -3897,7 +3899,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       /* bit 4 is reserved - don't use */
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
index f83a71a3e46d75547e540ed317f99531838f408b..440eefc67397e48f15b58bc8cf31712bec91286b 100644 (file)
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
+struct remove_event {
+       struct perf_event *event;
+       bool detach_group;
+};
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
  */
 static int __perf_remove_from_context(void *info)
 {
-       struct perf_event *event = info;
+       struct remove_event *re = info;
+       struct perf_event *event = re->event;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
+       if (re->detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
                ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
+       struct remove_event re = {
+               .event = event,
+               .detach_group = detach_group,
+       };
 
        lockdep_assert_held(&ctx->mutex);
 
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
                 * Per cpu events are removed via an smp call and
                 * the removal is always successful.
                 */
-               cpu_function_call(event->cpu, __perf_remove_from_context, event);
+               cpu_function_call(event->cpu, __perf_remove_from_context, &re);
                return;
        }
 
 retry:
-       if (!task_function_call(task, __perf_remove_from_context, event))
+       if (!task_function_call(task, __perf_remove_from_context, &re))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
         * Since the task isn't running, its safe to remove the event, us
         * holding the ctx->lock ensures the task won't get scheduled in.
         */
+       if (detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
 
 static void unaccount_event_cpu(struct perf_event *event, int cpu)
 {
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
        unaccount_event(event);
 
        if (event->rb) {
-               struct ring_buffer *rb;
-
                /*
                 * Can happen when we close an event with re-directed output.
                 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
                 * over us; possibly making our ring_buffer_put() the last.
                 */
                mutex_lock(&event->mmap_mutex);
-               rb = event->rb;
-               if (rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* could be last */
-               }
+               ring_buffer_attach(event, NULL);
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
         *     to trigger the AB-BA case.
         */
        mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
-       raw_spin_lock_irq(&ctx->lock);
-       perf_group_detach(event);
-       raw_spin_unlock_irq(&ctx->lock);
-       perf_remove_from_context(event);
+       perf_remove_from_context(event, true);
        mutex_unlock(&ctx->mutex);
 
        free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
 static void ring_buffer_attach(struct perf_event *event,
                               struct ring_buffer *rb)
 {
+       struct ring_buffer *old_rb = NULL;
        unsigned long flags;
 
-       if (!list_empty(&event->rb_entry))
-               return;
+       if (event->rb) {
+               /*
+                * Should be impossible, we set this when removing
+                * event->rb_entry and wait/clear when adding event->rb_entry.
+                */
+               WARN_ON_ONCE(event->rcu_pending);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       if (list_empty(&event->rb_entry))
-               list_add(&event->rb_entry, &rb->event_list);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
-}
+               old_rb = event->rb;
+               event->rcu_batches = get_state_synchronize_rcu();
+               event->rcu_pending = 1;
 
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
-{
-       unsigned long flags;
+               spin_lock_irqsave(&old_rb->event_lock, flags);
+               list_del_rcu(&event->rb_entry);
+               spin_unlock_irqrestore(&old_rb->event_lock, flags);
+       }
 
-       if (list_empty(&event->rb_entry))
-               return;
+       if (event->rcu_pending && rb) {
+               cond_synchronize_rcu(event->rcu_batches);
+               event->rcu_pending = 0;
+       }
+
+       if (rb) {
+               spin_lock_irqsave(&rb->event_lock, flags);
+               list_add_rcu(&event->rb_entry, &rb->event_list);
+               spin_unlock_irqrestore(&rb->event_lock, flags);
+       }
+
+       rcu_assign_pointer(event->rb, rb);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_del_init(&event->rb_entry);
-       wake_up_all(&event->waitq);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
 }
 
 static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        atomic_dec(&rb->mmap_count);
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
-               return;
+               goto out_put;
 
-       /* Detach current event from the buffer. */
-       rcu_assign_pointer(event->rb, NULL);
-       ring_buffer_detach(event, rb);
+       ring_buffer_attach(event, NULL);
        mutex_unlock(&event->mmap_mutex);
 
        /* If there's still other mmap()s of this buffer, we're done. */
-       if (atomic_read(&rb->mmap_count)) {
-               ring_buffer_put(rb); /* can't be last */
-               return;
-       }
+       if (atomic_read(&rb->mmap_count))
+               goto out_put;
 
        /*
         * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
                 * still restart the iteration to make sure we're not now
                 * iterating the wrong list.
                 */
-               if (event->rb == rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* can't be last, we still have one */
-               }
+               if (event->rb == rb)
+                       ring_buffer_attach(event, NULL);
+
                mutex_unlock(&event->mmap_mutex);
                put_event(event);
 
@@ -4007,6 +4025,7 @@ again:
        vma->vm_mm->pinned_vm -= mmap_locked;
        free_uid(mmap_user);
 
+out_put:
        ring_buffer_put(rb); /* could be last */
 }
 
@@ -4124,7 +4143,6 @@ again:
        vma->vm_mm->pinned_vm += extra;
 
        ring_buffer_attach(event, rb);
-       rcu_assign_pointer(event->rb, rb);
 
        perf_event_init_userpage(event);
        perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
 
        /* Recursion avoidance in each contexts */
        int                             recursion[PERF_NR_CONTEXTS];
+
+       /* Keeps track of cpu being initialized/exited */
+       bool                            online;
 };
 
 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        hwc->state = !(flags & PERF_EF_START);
 
        head = find_swevent_head(swhash, event);
-       if (WARN_ON_ONCE(!head))
+       if (!head) {
+               /*
+                * We can race with cpu hotplug code. Do not
+                * WARN if the cpu just got unplugged.
+                */
+               WARN_ON_ONCE(swhash->online);
                return -EINVAL;
+       }
 
        hlist_add_head_rcu(&event->hlist_entry, head);
 
@@ -6914,7 +6941,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct ring_buffer *rb = NULL, *old_rb = NULL;
+       struct ring_buffer *rb = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
@@ -6942,8 +6969,6 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
-       old_rb = event->rb;
-
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
                        goto unlock;
        }
 
-       if (old_rb)
-               ring_buffer_detach(event, old_rb);
-
-       if (rb)
-               ring_buffer_attach(event, rb);
-
-       rcu_assign_pointer(event->rb, rb);
-
-       if (old_rb) {
-               ring_buffer_put(old_rb);
-               /*
-                * Since we detached before setting the new rb, so that we
-                * could attach the new rb, we could have missed a wakeup.
-                * Provide it now.
-                */
-               wake_up_all(&event->waitq);
-       }
+       ring_buffer_attach(event, rb);
 
        ret = 0;
 unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
        if (attr.freq) {
                if (attr.sample_freq > sysctl_perf_event_sample_rate)
                        return -EINVAL;
+       } else {
+               if (attr.sample_period & (1ULL << 63))
+                       return -EINVAL;
        }
 
        /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_context *gctx = group_leader->ctx;
 
                mutex_lock(&gctx->mutex);
-               perf_remove_from_context(group_leader);
+               perf_remove_from_context(group_leader, false);
 
                /*
                 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling);
+                       perf_remove_from_context(sibling, false);
                        perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock(&src_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event);
+               perf_remove_from_context(event, false);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       if (child_event->parent) {
-               raw_spin_lock_irq(&child_ctx->lock);
-               perf_group_detach(child_event);
-               raw_spin_unlock_irq(&child_ctx->lock);
-       }
-
-       perf_remove_from_context(child_event);
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
         * swapped under us.
         */
        parent_ctx = perf_pin_task_context(parent, ctxn);
+       if (!parent_ctx)
+               return 0;
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = true;
        if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 
 static void __perf_event_exit_context(void *__info)
 {
+       struct remove_event re = { .detach_group = false };
        struct perf_event_context *ctx = __info;
-       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(event);
+       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(&re);
        rcu_read_unlock();
 }
 
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
        perf_event_exit_cpu_context(cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = false;
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
 }
index d9d8ece46a15885410a1bb138f4c6c7155573803..204d3d281809aa90686b8d4f0199c7d933710908 100644 (file)
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
        if (likely(prev->sched_class == class &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
                p = fair_sched_class.pick_next_task(rq, prev);
-               if (likely(p && p != RETRY_TASK))
-                       return p;
+               if (unlikely(p == RETRY_TASK))
+                       goto again;
+
+               /* assumes fair_sched_class->next == idle_sched_class */
+               if (unlikely(!p))
+                       p = idle_sched_class.pick_next_task(rq, prev);
+
+               return p;
        }
 
 again:
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_throttled = 0;
        dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  * sys_sched_setattr - same as above, but with extended sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
                               unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
  * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
                unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                                        ,
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
        };
        SD_INIT_NAME(sd, NUMA);
        sd->private = &tl->data;
index 5b9bb42b2d47e760f3a7cd17af24ba37d2cf07ea..ab001b5d50487815da984947b592b26038cce794 100644 (file)
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
  */
 void cpudl_cleanup(struct cpudl *cp)
 {
-       /*
-        * nothing to do for the moment
-        */
+       free_cpumask_var(cp->free_cpus);
 }
index 8b836b376d9129760066326eabf5040f72b2e4f3..3031bac8aa3ea990bc7425e835675c7a5a386ab5 100644 (file)
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
        int idx = 0;
        int task_pri = convert_prio(p->prio);
 
-       if (task_pri >= MAX_RT_PRIO)
-               return 0;
+       BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
        for (idx = 0; idx < task_pri; idx++) {
                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
index a95097cb4591b5bfa2466adb5600895e782fc661..72fdf06ef8652d5cb443b080f53ac117bd5517ba 100644 (file)
@@ -332,50 +332,50 @@ out:
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq)
+                                        struct rq *rq, int ticks)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+       u64 cputime = (__force u64) cputime_one_jiffy;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
+       cputime *= ticks;
+       scaled *= ticks;
+
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_IRQ] += cputime;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_SOFTIRQ] += cputime;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SOFTIRQ);
+               __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        } else if (p == rq->idle) {
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
-               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_guest_time(p, cputimescaled);
        } else {
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SYSTEM);
+               __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
        }
 }
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       int i;
        struct rq *rq = this_rq();
 
-       for (i = 0; i < ticks; i++)
-               irqtime_account_process_tick(current, 0, rq);
+       irqtime_account_process_tick(current, 0, rq, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) {}
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq) {}
+                                               struct rq *rq, int nr_ticks) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq);
+               irqtime_account_process_tick(p, user_tick, rq, 1);
                return;
        }
 
index b08095786cb8fff0c96773eb5f6a582da503bcb8..800e99b99075141421d82f0bdc07e42f09baea9d 100644 (file)
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
        sched_clock_tick();
        update_rq_clock(rq);
        dl_se->dl_throttled = 0;
+       dl_se->dl_yielded = 0;
        if (p->on_rq) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
         * We make the task go to sleep until its current deadline by
         * forcing its runtime to zero. This way, update_curr_dl() stops
         * it and the bandwidth timer will wake it up and will give it
-        * new scheduling parameters (thanks to dl_new=1).
+        * new scheduling parameters (thanks to dl_yielded=1).
         */
        if (p->dl.runtime > 0) {
-               rq->curr->dl.dl_new = 1;
+               rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
        update_curr_dl(rq);
index 7570dd969c2838e9aab12ba9cb2c24cb87e21855..0fdb96de81a5b8a92c302961769cdefdb5cad915 100644 (file)
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
        int this_cpu = this_rq->cpu;
 
        idle_enter_fair(this_rq);
+
        /*
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
 
        raw_spin_lock(&this_rq->lock);
 
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
+
        /*
-        * While browsing the domains, we released the rq lock.
-        * A task could have be enqueued in the meantime
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
         */
-       if (this_rq->cfs.h_nr_running && !pulled_task) {
+       if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
-               goto out;
-       }
 
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
                this_rq->next_balance = next_balance;
        }
 
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
-
 out:
        /* Is there a task of a high priority class? */
        if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
index 000a220e2a4142f8ed93b5e4c98125f9cee9c443..088358c8006bb9c109da188c2b5ccf4a91614114 100644 (file)
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
 {
        int ret = 0;
        /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+       if (test_bit(AS_ENOSPC, &mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &mapping->flags))
                ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
+       if (test_bit(AS_EIO, &mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &mapping->flags))
                ret = -EIO;
        return ret;
 }
index 539eeb96b323bf649f83783e0dddcb4f907e1d6e..a402f8fdc68e94888ea177104524085c9f490fd5 100644 (file)
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
        for (; start < end; start += PAGE_SIZE) {
                index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-               page = find_get_page(mapping, index);
+               page = find_get_entry(mapping, index);
                if (!radix_tree_exceptional_entry(page)) {
                        if (page)
                                page_cache_release(page);
index c47dffdcb246b0cc3120d50d6dfff1a6cd0369ea..5177c6d4a2ddbf6d28ece095287c1a4a36ef9a2a 100644 (file)
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
        rcu_read_lock();
        do {
-               memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-               if (unlikely(!memcg))
+               /*
+                * Page cache insertions can happen withou an
+                * actual mm context, e.g. during disk probing
+                * on boot, loopback IO, acct() writes etc.
+                */
+               if (unlikely(!mm))
                        memcg = root_mem_cgroup;
+               else {
+                       memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+                       if (unlikely(!memcg))
+                               memcg = root_mem_cgroup;
+               }
        } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
                return 0;
        }
 
-       /*
-        * Page cache insertions can happen without an actual mm
-        * context, e.g. during disk probing on boot.
-        */
-       if (unlikely(!mm))
-               memcg = root_mem_cgroup;
-       else {
-               memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
-               if (!memcg)
-                       return -ENOMEM;
-       }
+       memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+       if (!memcg)
+               return -ENOMEM;
        __mem_cgroup_commit_charge(memcg, page, 1, type, false);
        return 0;
 }
index 35ef28acf137c0ab76393ede3dbc1c3d820f5c37..9ccef39a9de261c96f4e5775d7dca48b63d4d133 100644 (file)
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        return 0;
                } else if (PageHuge(hpage)) {
                        /*
-                        * Check "just unpoisoned", "filter hit", and
-                        * "race with other subpage."
+                        * Check "filter hit" and "race with other subpage."
                         */
                        lock_page(hpage);
-                       if (!PageHWPoison(hpage)
-                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
-                           || (p != hpage && TestSetPageHWPoison(hpage))) {
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                               return 0;
+                       if (PageHWPoison(hpage)) {
+                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+                                   || (p != hpage && TestSetPageHWPoison(hpage))) {
+                                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                                       unlock_page(hpage);
+                                       return 0;
+                               }
                        }
                        set_page_hwpoison_huge_page(hpage);
                        res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               put_page(hpage);
                res = 0;
                goto out;
        }
index 175273f38cb1bd59f5aeb88cb8c815033475dfe8..44ebd5c2cd4aef0f86bd6475132cc40c501c6fef 100644 (file)
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
+       vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 733ec283ed1b9e85f9181f67116052f88bb49951..019efb79708f81976bc6484cc371a8fa6c0c080e 100644 (file)
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
        }
 }
 
-static int vlan_calculate_locking_subclass(struct net_device *real_dev)
-{
-       int subclass = 0;
-
-       while (is_vlan_dev(real_dev)) {
-               subclass++;
-               real_dev = vlan_dev_priv(real_dev)->real_dev;
-       }
-
-       return subclass;
-}
-
-static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
-static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
-       vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
 }
 
+static int vlan_dev_get_lock_subclass(struct net_device *dev)
+{
+       return vlan_dev_priv(dev)->nest_level;
+}
+
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
 
        netif_carrier_off(dev);
 
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       subclass = vlan_calculate_locking_subclass(dev);
-       vlan_dev_set_lockdep_class(dev, subclass);
+       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
 
        vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
+       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
 void vlan_setup(struct net_device *dev)
index b3bd4ec3fd9452f0d1f9a99dd4782260ab65c818..f04224c32005aa9a732622805915fe9aead9ee3e 100644 (file)
@@ -1545,6 +1545,8 @@ out_neigh:
        if ((orig_neigh_node) && (!is_single_hop_neigh))
                batadv_orig_node_free_ref(orig_neigh_node);
 out:
+       if (router_ifinfo)
+               batadv_neigh_ifinfo_free_ref(router_ifinfo);
        if (router)
                batadv_neigh_node_free_ref(router);
        if (router_router)
index b25fd64d727b0d6e8227671f860b133095df5100..aa5d4946d0d784d32fdcdce217c4f2bb482502f0 100644 (file)
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
-                                       BATADV_NO_FLAGS)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
                        ret = true;
                        goto out;
                }
index bcc4bea632fa69ead6567e016f2f265840f7968b..f14e54a0569178e8b423b4921c4b610e4c63039c 100644 (file)
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_neigh_node *neigh_node)
 {
        struct batadv_priv *bat_priv;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
        unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned header_size = sizeof(frag_header);
        unsigned max_fragment_size, max_packet_size;
+       bool ret = false;
 
        /* To avoid merge and refragmentation at next-hops we never send
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                           skb->len + ETH_HLEN);
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 
-       return true;
+       ret = true;
+
 out_err:
-       return false;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+
+       return ret;
 }
index c835e137423bb9ec70b98b5130d5a33c12f9b139..90cff585b37d5a3779cdb8f7b3b90a26eb2df88e 100644 (file)
 
 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
 {
-       if (atomic_dec_and_test(&gw_node->refcount))
+       if (atomic_dec_and_test(&gw_node->refcount)) {
+               batadv_orig_node_free_ref(gw_node->orig_node);
                kfree_rcu(gw_node, rcu);
+       }
 }
 
 static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        if (gateway->bandwidth_down == 0)
                return;
 
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               return;
+
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-       if (!gw_node)
+       if (!gw_node) {
+               batadv_orig_node_free_ref(orig_node);
                return;
+       }
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
index b851cc58085330acbab02848fedf3cb01751a060..fbda6b54baffccf798375cb8add49bb179738386 100644 (file)
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == net_dev->ifindex)
+       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
index ffd9dfbd9b0e856e35e2ac6ea594739e8feb614d..6a484514cd3e98b9e0b27a924b4dcb92f2682055 100644 (file)
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
 {
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_neigh_node *router;
 
        orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
 
        if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
                batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
 
+       /* this is the last reference to this object */
+       router = rcu_dereference_protected(orig_ifinfo->router, true);
+       if (router)
+               batadv_neigh_node_free_ref_now(router);
        kfree(orig_ifinfo);
 }
 
@@ -701,6 +706,47 @@ free_orig_node:
        return NULL;
 }
 
+/**
+ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+                         struct batadv_neigh_node *neigh)
+{
+       struct batadv_neigh_ifinfo *neigh_ifinfo;
+       struct batadv_hard_iface *if_outgoing;
+       struct hlist_node *node_tmp;
+
+       spin_lock_bh(&neigh->ifinfo_lock);
+
+       /* for all ifinfo objects for this neighinator */
+       hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+                                 &neigh->ifinfo_list, list) {
+               if_outgoing = neigh_ifinfo->if_outgoing;
+
+               /* always keep the default interface */
+               if (if_outgoing == BATADV_IF_DEFAULT)
+                       continue;
+
+               /* don't purge if the interface is not (going) down */
+               if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
+                   (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
+                   (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
+                       continue;
+
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+                          neigh->addr, if_outgoing->net_dev->name);
+
+               hlist_del_rcu(&neigh_ifinfo->list);
+               batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+       }
+
+       spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
 /**
  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 
                        hlist_del_rcu(&neigh_node->list);
                        batadv_neigh_node_free_ref(neigh_node);
+               } else {
+                       /* only necessary if not the whole neighbor is to be
+                        * deleted, but some interface has been removed.
+                        */
+                       batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
                }
        }
 
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
 {
        struct batadv_neigh_node *best_neigh_node;
        struct batadv_hard_iface *hard_iface;
-       bool changed;
+       bool changed_ifinfo, changed_neigh;
 
        if (batadv_has_timed_out(orig_node->last_seen,
                                 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                           jiffies_to_msecs(orig_node->last_seen));
                return true;
        }
-       changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
-       changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
+       changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+       changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
 
-       if (!changed)
+       if (!changed_ifinfo && !changed_neigh)
                return false;
 
        /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
        bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
 
 out:
-       batadv_hardif_free_ref(hard_iface);
+       if (hard_iface)
+               batadv_hardif_free_ref(hard_iface);
        return 0;
 }
 
index 80e1b0f60a30214002684a42b1bab1a02e9d9962..2acf7fa1fec6c2309123dc189ea964f66f230d07 100644 (file)
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
 
-       if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+       if (skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
            !skb_is_gso(skb)) {
                if (br_parse_ip_options(skb))
index d2c8a06b3a9883b618c55a8cddf0929a4bcd049f..9abc503b19b7dad367b83a8179468ee3e1ae72d5 100644 (file)
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-                                           const struct net_device *dev,
-                                           netdev_features_t features)
+       netdev_features_t features)
 {
        int tmp;
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
-       } else if (illegal_highdma(dev, skb)) {
+       } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
 
        return features;
 }
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       netdev_features_t features = dev->features;
+       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, dev, features);
+               return harmonize_features(skb, features);
        }
 
-       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_STAG_TX;
 
-       return harmonize_features(skb, dev, features);
+       return harmonize_features(skb, features);
 }
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
+       NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
@@ -4542,6 +4541,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
 }
 EXPORT_SYMBOL(netdev_adjacent_get_private);
 
+/**
+ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                struct list_head **iter)
+{
+       struct netdev_adjacent *upper;
+
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+
+       upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+
+       return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
 /**
  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
@@ -4623,6 +4648,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
+/**
+ * netdev_lower_get_next - Get the next device from the lower neighbour
+ *                         list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RTNL lock or
+ * its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+
+       return lower->dev;
+}
+EXPORT_SYMBOL(netdev_lower_get_next);
+
 /**
  * netdev_lower_get_first_private_rcu - Get the first ->private from the
  *                                    lower neighbour list, RCU
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
+
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev))
+{
+       struct net_device *lower = NULL;
+       struct list_head *iter;
+       int max_nest = -1;
+       int nest;
+
+       ASSERT_RTNL();
+
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               nest = dev_get_nest_level(lower, type_check);
+               if (max_nest < nest)
+                       max_nest = nest;
+       }
+
+       if (type_check(dev))
+               max_nest++;
+
+       return max_nest;
+}
+EXPORT_SYMBOL(dev_get_nest_level);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
        if (ops->ndo_set_rx_mode)
                ops->ndo_set_rx_mode(dev);
 }
-EXPORT_SYMBOL(__dev_set_rx_mode);
 
 void dev_set_rx_mode(struct net_device *dev)
 {
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
index 8f8a96ef9f3f64ba519fe4c872d46c7b7c680ec9..32d872eec7f5c535221898cdb45ab8f235d0b4bb 100644 (file)
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
        neigh->updated = jiffies;
        if (!(neigh->nud_state & NUD_FAILED))
                return;
-       neigh->nud_state = NUD_PROBE;
-       atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+       neigh->nud_state = NUD_INCOMPLETE;
+       atomic_set(&neigh->probes, neigh_max_probes(neigh));
        neigh_add_timer(neigh,
                        jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 }
index 81d3a9a084536541867afe9350602c0c73253006..7c8ffd97496175c60d3947019bfd138bb9746938 100644 (file)
@@ -24,7 +24,7 @@
 
 static LIST_HEAD(pernet_list);
 static struct list_head *first_device = &pernet_list;
-static DEFINE_MUTEX(net_mutex);
+DEFINE_MUTEX(net_mutex);
 
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
index 9837bebf93cea9e9a2f909947326b83b3a3356f9..2d8d8fcfa060c51e2f6cbe240aff3d01b3f964cc 100644 (file)
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
+/* Return with the rtnl_lock held when there are no network
+ * devices unregistering in any network namespace.
+ */
+static void rtnl_lock_unregistering_all(void)
+{
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               for_each_net(net) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 /**
  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  * @ops: struct rtnl_link_ops * to unregister
  */
 void rtnl_link_unregister(struct rtnl_link_ops *ops)
 {
-       rtnl_lock();
+       /* Close the race with cleanup_net() */
+       mutex_lock(&net_mutex);
+       rtnl_lock_unregistering_all();
        __rtnl_link_unregister(ops);
        rtnl_unlock();
+       mutex_unlock(&net_mutex);
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
index 1b62343f58378b3d8fc0e3ea048dbb45ce1e3a76..8383b2bddeb923bd629da7d9eac2cdd1ff1d81bd 100644 (file)
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       lp = NAPI_GRO_CB(p)->last ?: p;
+       lp = NAPI_GRO_CB(p)->last;
        pinfo = skb_shinfo(lp);
 
        if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
 
        __skb_pull(skb, offset);
 
-       if (!NAPI_GRO_CB(p)->last)
+       if (NAPI_GRO_CB(p)->last == p)
                skb_shinfo(p)->frag_list = skb;
        else
                NAPI_GRO_CB(p)->last->next = skb;
index 2f737bf90b3fe4235c75ccca6640c735ecaa076b..eed34338736c275aa02bfa40448801d46dda736b 100644 (file)
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
 {
        struct __net_random_once_work *work =
                container_of(w, struct __net_random_once_work, work);
-       if (!static_key_enabled(work->key))
-               static_key_slow_inc(work->key);
+       BUG_ON(!static_key_enabled(work->key));
+       static_key_slow_dec(work->key);
        kfree(work);
 }
 
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
 }
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
-                          struct static_key *done_key)
+                          struct static_key *once_key)
 {
        static DEFINE_SPINLOCK(lock);
        unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
        *done = true;
        spin_unlock_irqrestore(&lock, flags);
 
-       __net_random_once_disable_jump(done_key);
+       __net_random_once_disable_jump(once_key);
 
        return true;
 }
index 0eb5d5e76dfbe1f99537e8a561f1671389c09e16..5db37cef50a9ccd80c642118f54dd4ecf04219b8 100644 (file)
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
                goto out_free;
        }
 
-       chip_index = 0;
+       chip_index = -1;
        for_each_available_child_of_node(np, child) {
+               chip_index++;
                cd = &pd->chip[chip_index];
 
                cd->mii_bus = &mdio_bus->dev;
index 8c54870db792cab059bff464d29776510ec3e5ec..6d6dd345bc4d89dea7dac7b179e4ef0f391b3954 100644 (file)
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
        return register_pernet_subsys(&ipv4_mib_ops);
 }
 
+static __net_init int inet_init_net(struct net *net)
+{
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] =  32768;
+       net->ipv4.ip_local_ports.range[1] =  61000;
+
+       seqlock_init(&net->ipv4.ping_group_range.lock);
+       /*
+        * Sane defaults - nobody may create ping sockets.
+        * Boot scripts should set this to distro-specific group.
+        */
+       net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+       return 0;
+}
+
+static __net_exit void inet_exit_net(struct net *net)
+{
+}
+
+static __net_initdata struct pernet_operations af_inet_ops = {
+       .init = inet_init_net,
+       .exit = inet_exit_net,
+};
+
+static int __init init_inet_pernet_ops(void)
+{
+       return register_pernet_subsys(&af_inet_ops);
+}
+
 static int ipv4_proc_init(void);
 
 /*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
        if (ip_mr_init())
                pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
 #endif
+
+       if (init_inet_pernet_ops())
+               pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
        /*
         *      Initialise per-cpu ipv4 mibs
         */
index 8a043f03c88ecbb418b5466953abefd54c50b1d0..b10cd43a4722730205272d7822d699bc49ea71d3 100644 (file)
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
-       fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
        fi->fib_protocol = cfg->fc_protocol;
index 0d1e2cb877ec43692c5a7b4fe57e16cf921a8c97..a56b8e6e866a8c4327f86111adac947e9ffc2445 100644 (file)
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
-               *low = net->ipv4.sysctl_local_ports.range[0];
-               *high = net->ipv4.sysctl_local_ports.range[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+               *low = net->ipv4.ip_local_ports.range[0];
+               *high = net->ipv4.ip_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
index be8abe73bb9f464a2e68679255acde3b708ce84b..6f111e48e11c15a19ffd60d345b1399fd3c66953 100644 (file)
 static bool ip_may_fragment(const struct sk_buff *skb)
 {
        return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-              !skb->local_df;
+               skb->local_df;
 }
 
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        return true;
 }
 
-static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
-{
-       unsigned int mtu;
-
-       if (skb->local_df || !skb_is_gso(skb))
-               return false;
-
-       mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
-
-       /* if seglen > mtu, do software segmentation for IP fragmentation on
-        * output.  DF bit cannot be set since ip_forward would have sent
-        * icmp error.
-        */
-       return skb_gso_network_seglen(skb) > mtu;
-}
-
-/* called if GSO skb needs to be fragmented on forward */
-static int ip_forward_finish_gso(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       netdev_features_t features;
-       struct sk_buff *segs;
-       int ret = 0;
-
-       features = netif_skb_dev_features(skb, dst->dev);
-       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       consume_skb(skb);
-
-       do {
-               struct sk_buff *nskb = segs->next;
-               int err;
-
-               segs->next = NULL;
-               err = dst_output(segs);
-
-               if (err && ret == 0)
-                       ret = err;
-               segs = nskb;
-       } while (segs);
-
-       return ret;
-}
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       if (ip_gso_exceeds_dst_mtu(skb))
-               return ip_forward_finish_gso(skb);
-
        return dst_output(skb);
 }
 
index c10a3ce5cbff0fc0bd0f23ac72188fd9e39fa83f..ed32313e307c43202a4710c6f5b74e14c19a4c20 100644 (file)
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
-                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
+                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
+                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
 
index 1cbeba5edff90fa1ac891d4dd23cfb65464878a4..a52f50187b5495a1157c2ef8e0785da730414022 100644 (file)
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static int ip_finish_output_gso(struct sk_buff *skb)
+{
+       netdev_features_t features;
+       struct sk_buff *segs;
+       int ret = 0;
+
+       /* common case: locally created skb or seglen is <= mtu */
+       if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
+             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+               return ip_finish_output2(skb);
+
+       /* Slowpath -  GSO segment length is exceeding the dst MTU.
+        *
+        * This can happen in two cases:
+        * 1) TCP GRO packet, DF bit not set
+        * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
+        * from host network stack.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       do {
+               struct sk_buff *nskb = segs->next;
+               int err;
+
+               segs->next = NULL;
+               err = ip_fragment(segs, ip_finish_output2);
+
+               if (err && ret == 0)
+                       ret = err;
+               segs = nskb;
+       } while (segs);
+
+       return ret;
+}
+
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(skb);
+
+       if (skb->len > ip_skb_dst_mtu(skb))
                return ip_fragment(skb, ip_finish_output2);
-       else
-               return ip_finish_output2(skb);
+
+       return ip_finish_output2(skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
index b3f859731c60eccfdd77517989cff5ba84f6581a..2acc2337d38bfe7f35517e13952cfa8a306c24fd 100644 (file)
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int err;
-       bool connected = true;
+       bool connected;
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
 
        dst = tnl_params->daddr;
        if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
         */
        if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
                ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
        }
        rtnl_unlock();
index afcee51b90ede30a846bbd7a5c16b6996d955889..13ef00f1e17b88943ddee9ae9ba52b7d0efe7832 100644 (file)
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 static int vti4_err(struct sk_buff *skb, u32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel;
        struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
        if (!tunnel)
                return -1;
 
+       mark = be32_to_cpu(tunnel->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
                return 0;
        }
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET);
        if (!x)
                return 0;
index 12e13bd82b5bba4fdd183d5ba2cda098a1c0c683..f40f321b41fc2e30b21019333efdc5757404fe0a 100644 (file)
@@ -22,7 +22,6 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-/* Returns new sk_buff, or NULL */
 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        err = ip_defrag(skb, user);
        local_bh_enable();
 
-       if (!err)
+       if (!err) {
                ip_send_check(ip_hdr(skb));
+               skb->local_df = 1;
+       }
 
        return err;
 }
index 8210964a9f19bedf17d6f3266c1fd0775f3de144..044a0ddf6a791ace04fbb1802e64563bf3fc5518 100644 (file)
@@ -236,15 +236,15 @@ exit:
 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
                                          kgid_t *high)
 {
-       kgid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.ping_group_range.range;
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 
index db1e0da871f40a2284d67bd48c0f21d772b923f3..5e676be3daeb19e5f48df5e767a66b83ac73b432 100644 (file)
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
        struct in_device *out_dev;
        unsigned int flags = 0;
        bool do_cache;
-       u32 itag;
+       u32 itag = 0;
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
index 44eba052b43d3ab49ba7630bcd82e73e5b094472..5cde8f263d40c0eb0204d310fee99146dabb7a87 100644 (file)
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] = range[0];
-       net->ipv4.sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] = range[0];
+       net->ipv4.ip_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 size_t *lenp, loff_t *ppos)
 {
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
+               container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ping_group_range",
-               .data           = &init_net.ipv4.sysctl_ping_group_range,
+               .data           = &init_net.ipv4.ping_group_range.range,
                .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ip_local_port_range",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
-               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
+               .data           = &init_net.ipv4.ip_local_ports.range,
                .mode           = 0644,
                .proc_handler   = ipv4_local_port_range,
        },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       /*
-        * Sane defaults - nobody may create ping sockets.
-        * Boot scripts should set this to distro-specific group.
-        */
-       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
-       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
-
-       /*
-        * Set defaults for local port range
-        */
-       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] =  32768;
-       net->ipv4.sysctl_local_ports.range[1] =  61000;
-
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
index 40e701f2e1e0324af6f0af781ac6715866ad88d3..186a8ecf92fa84bda9d0f6b050131da603c7694a 100644 (file)
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-
-       skb->protocol = htons(ETH_P_IP);
+       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
 
 int xfrm4_output_finish(struct sk_buff *skb)
 {
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+       skb->protocol = htons(ETH_P_IP);
+
+#ifdef CONFIG_NETFILTER
+       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+#endif
+
+       return xfrm_output(skb);
+}
+
+static int __xfrm4_output(struct sk_buff *skb)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
 #ifdef CONFIG_NETFILTER
-       if (!skb_dst(skb)->xfrm) {
+       if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
-
-       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IP);
-       return xfrm_output(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
 }
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
-
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, dst->dev,
-                           x->outer_mode->afinfo->output_finish,
+                           NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
index 7f7b243e8139defccf14165971a92a7261c29b71..a2ce0101eaac846b1e36e69a5a89c804ca247c44 100644 (file)
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
 
-       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+       if (!head)
+               return 0;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
                        return ret;
 
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
 
        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 
-       for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+       if (!head)
+               goto out;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
                        return ret;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
        struct xfrm4_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 59f95affceb0773d052184bdf5fec9f276433d63..b2f091566f88453bce5eb3c33b47e1c9c040447c 100644 (file)
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        unsigned int off;
        u16 flush = 1;
        int proto;
-       __wsum csum;
 
        off = skb_gro_offset(skb);
        hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        NAPI_GRO_CB(skb)->flush |= flush;
 
-       csum = skb->csum;
-       skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+       skb_gro_postpull_rcsum(skb, iph, nlen);
 
        pp = ops->callbacks.gro_receive(head, skb);
 
-       skb->csum = csum;
-
 out_unlock:
        rcu_read_unlock();
 
index 40e7581374f7006c6f8c436ed686919ac93c2b19..fbf11562b54c1a7c8809f33cc969c66319517377 100644 (file)
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
+       /* ipv6 conntrack defrag sets max_frag_size + local_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
 
+       if (skb->local_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                unsigned int maxnonfragsize, headersize;
 
                headersize = sizeof(struct ipv6hdr) +
-                            (opt ? opt->tot_len : 0) +
+                            (opt ? opt->opt_flen + opt->opt_nflen : 0) +
                             (dst_allfrag(&rt->dst) ?
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
index b05b609f69d1cd3e58bd525cb0b5e8b11d429b80..f6a66bb4114db3af0f23ee1f950acf8b9720304d 100644 (file)
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
 {
        u8 proto;
 
-       if (!data)
+       if (!data || !data[IFLA_IPTUN_PROTO])
                return 0;
 
        proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
index b7c0f827140b402685cc29049cb56646471c2cf2..6cc9f9371cc57cd6b0233815a73134dddfa6207c 100644 (file)
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                    u8 type, u8 code, int offset, __be32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip6_tnl *t;
        struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!t)
                return -1;
 
+       mark = be32_to_cpu(t->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
            type != NDISC_REDIRECT)
                return 0;
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET6);
        if (!x)
                return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
 
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
index 09a22f4f36c9e069c6dfb3074909691ef2c82399..ca8d4ea48a5d9fa641bf129a6fc5e3b428799fa4 100644 (file)
@@ -851,7 +851,7 @@ out:
 static void ndisc_recv_na(struct sk_buff *skb)
 {
        struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
-       const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
        u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
                        /*
                         * Change: router to host
                         */
-                       struct rt6_info *rt;
-                       rt = rt6_get_dflt_router(saddr, dev);
-                       if (rt)
-                               ip6_del_rt(rt);
+                       rt6_clean_tohost(dev_net(dev),  saddr);
                }
 
 out:
index 95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f..d38e6a8d8b9fb82ec7d583a5ab2abc652838d470 100644 (file)
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
+       int err;
 
        dst = ip6_route_output(net, skb->sk, &fl6);
-       if (dst->error) {
+       err = dst->error;
+       if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
                dst_release(dst);
-               return dst->error;
+               return err;
        }
 
        /* Drop old route. */
index 004fffb6c2218a12a9619eb08f359a8f2b391297..6ebdb7b6744cc933801dcf4c6b3dea6ddce8a89c 100644 (file)
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
        fib6_clean_all(net, fib6_remove_prefsrc, &adni);
 }
 
+#define RTF_RA_ROUTER          (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+#define RTF_CACHE_GATEWAY      (RTF_GATEWAY | RTF_CACHE)
+
+/* Remove routers and update dst entries when gateway turn into host. */
+static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+{
+       struct in6_addr *gateway = (struct in6_addr *)arg;
+
+       if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+            ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+            ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+               return -1;
+       }
+       return 0;
+}
+
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+{
+       fib6_clean_all(net, fib6_clean_tohost, gateway);
+}
+
 struct arg_dev_net {
        struct net_device *dev;
        struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
 
+       if (tb[RTA_MARK])
+               fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index 0d78132ff18aa018fa4e9918dbfb0dd57f95147a..8517d3cd1aed460bbfb1bfb0f515924f008b790d 100644 (file)
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
        if (NAPI_GRO_CB(skb)->flush)
                goto skip_csum;
 
-       wsum = skb->csum;
+       wsum = NAPI_GRO_CB(skb)->csum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_NONE:
index 19ef329bdbf8e7418fa1d352bb6c90218935831e..b930d080c66f231f338c7ea860c8c1d798d91fea 100644 (file)
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
-       IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
-
-       skb->protocol = htons(ETH_P_IPV6);
        skb->local_df = 1;
 
        return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
 
 int xfrm6_output_finish(struct sk_buff *skb)
 {
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+       skb->protocol = htons(ETH_P_IPV6);
+
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IPV6);
        return xfrm_output(skb);
 }
 
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
        struct xfrm_state *x = dst->xfrm;
        int mtu;
 
+#ifdef CONFIG_NETFILTER
+       if (!x) {
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(skb);
+       }
+#endif
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
-                      skb_dst(skb)->dev, __xfrm6_output);
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 6ab989c486f7ee66c5cd6235ce4f758fe9f277a6..54d13f8dbbae10670756eee0b16b898423d06060 100644 (file)
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm6_protocol *handler;
+       struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
+
+       if (!head)
+               return 0;
 
        for_each_protocol_rcu(*proto_handlers(protocol), handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
        struct xfrm6_protocol __rcu **pprev;
        struct xfrm6_protocol *t;
        bool add_netproto = false;
-
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
        struct xfrm6_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 01e77b0ae0755d037093e7597a42db2a66378a51..8c9d7302c84682f4eec438405cf312da02cf9aab 100644 (file)
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+                       if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
index 222c28b75315f1ab43226e08566a5f911c6bacc7..f169b6ee94ee8d6ee9c6daa5dc047c5f8b1d2740 100644 (file)
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
 
        bool started, abort, hw_begun, notified;
        bool to_be_freed;
+       bool on_channel;
 
        unsigned long hw_start_time;
 
index dee50aefd6e868e247ba869e9e9883d4640330e3..27600a9808baeaa31be82ecb0f4218225b28665e 100644 (file)
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
 
        sdata_lock(sdata);
 
-       if (ifmgd->auth_data) {
+       if (ifmgd->auth_data || ifmgd->assoc_data) {
+               const u8 *bssid = ifmgd->auth_data ?
+                               ifmgd->auth_data->bss->bssid :
+                               ifmgd->assoc_data->bss->bssid;
+
                /*
-                * If we are trying to authenticate while suspending, cfg80211
-                * won't know and won't actually abort those attempts, thus we
-                * need to do that ourselves.
+                * If we are trying to authenticate / associate while suspending,
+                * cfg80211 won't know and won't actually abort those attempts,
+                * thus we need to do that ourselves.
                 */
-               ieee80211_send_deauth_disassoc(sdata,
-                                              ifmgd->auth_data->bss->bssid,
+               ieee80211_send_deauth_disassoc(sdata, bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               WLAN_REASON_DEAUTH_LEAVING,
                                               false, frame_buf);
-               ieee80211_destroy_auth_data(sdata, false);
+               if (ifmgd->assoc_data)
+                       ieee80211_destroy_assoc_data(sdata, false);
+               if (ifmgd->auth_data)
+                       ieee80211_destroy_auth_data(sdata, false);
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
index 6fb38558a5e6c79d81fc6ba4a4b03ff4313a701a..7a17decd27f91af8646da20b9ab75fc3e303e3c4 100644 (file)
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
                container_of(work, struct ieee80211_roc_work, work.work);
        struct ieee80211_sub_if_data *sdata = roc->sdata;
        struct ieee80211_local *local = sdata->local;
-       bool started;
+       bool started, on_channel;
 
        mutex_lock(&local->mtx);
 
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
        if (!roc->started) {
                struct ieee80211_roc_work *dep;
 
-               /* start this ROC */
-               ieee80211_offchannel_stop_vifs(local);
+               WARN_ON(local->use_chanctx);
+
+               /* If actually operating on the desired channel (with at least
+                * 20 MHz channel width) don't stop all the operations but still
+                * treat it as though the ROC operation started properly, so
+                * other ROC operations won't interfere with this one.
+                */
+               roc->on_channel = roc->chan == local->_oper_chandef.chan &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
 
-               /* switch channel etc */
+               /* start this ROC */
                ieee80211_recalc_idle(local);
 
-               local->tmp_channel = roc->chan;
-               ieee80211_hw_config(local, 0);
+               if (!roc->on_channel) {
+                       ieee80211_offchannel_stop_vifs(local);
+
+                       local->tmp_channel = roc->chan;
+                       ieee80211_hw_config(local, 0);
+               }
 
                /* tell userspace or send frame */
                ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
  finish:
                list_del(&roc->list);
                started = roc->started;
+               on_channel = roc->on_channel;
                ieee80211_roc_notify_destroy(roc, !roc->abort);
 
-               if (started) {
+               if (started && !on_channel) {
                        ieee80211_flush_queues(local, NULL);
 
                        local->tmp_channel = NULL;
index 216c45b949e513382447050eb560098a5edaa4b3..2b608b2b70ece8a6ed0cb8f8d8eed5b1d44e1536 100644 (file)
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
                    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        sta->last_rx = jiffies;
-                       if (ieee80211_is_data(hdr->frame_control)) {
+                       if (ieee80211_is_data(hdr->frame_control) &&
+                           !is_multicast_ether_addr(hdr->addr1)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
                                sta->last_rx_rate_vht_flag = status->vht_flag;
index 137a192e64bc3c2aa61cc9c5912a89bd3008cbe3..847d92f6bef60856be53ad13f0534695e4c6dac7 100644 (file)
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
-       if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+       if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
                                           sdata->smps_mode) &&
            sta->known_smps_mode != sdata->bss->req_smps &&
            sta_info_tx_streams(sta) != 1) {
index 00ba90b02ab2ab79c01d58dc5ba25785993f8dd6..60cb7a665976e10e7a909a9545b7643cf34e67a4 100644 (file)
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
            !is_multicast_ether_addr(hdr->addr1))
                txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
-       if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
-           (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
                txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
-       else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
                txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
index a0b0aea76525c341711c129519a1c3f89a704bb9..cec5b60487a4032e2b805df374616ea9ad038669 100644 (file)
 
 #define VIF_ENTRY      __field(enum nl80211_iftype, vif_type) __field(void *, sdata)   \
                        __field(bool, p2p)                                              \
-                       __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __string(vif_name, sdata->name)
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
+                       __assign_str(vif_name, sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
index 275c94f995f7c8401749cbbafb249bb52a418be7..3c365837e910edce60ac2348b0002361c4fca6ea 100644 (file)
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
+               cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
        /*
         * If this is for hw restart things are still running.
index e9e36a256165842ac112e35e612ba79c8f86d305..9265adfdabfcf99acbdf1598067884188c500a49 100644 (file)
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
-       /* A VHT STA must support 40 MHz */
-       if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-               return;
+       /*
+        * A VHT STA must support 40 MHz, but if we verify that here
+        * then we break a few things - some APs (e.g. Netgear R6300v2
+        * and others based on the BCM4360 chipset) will unset this
+        * capability bit when operating in 20 MHz.
+        */
 
        vht_cap->vht_supported = true;
 
index ccc46fa5edbce5e52710a22ae502e49a0f59e0a5..58579634427d2fcbf7f35556424a959697b8655e 100644 (file)
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
+
        ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
                                        cda[CTA_NAT_DST]);
        if (ret < 0)
index 804105391b9a903354ae9517d602c8c4638a8879..345acfb1720b14f00aae0e5937ab07bfb90e9482 100644 (file)
@@ -66,20 +66,6 @@ struct nft_jumpstack {
        int                     rulenum;
 };
 
-static inline void
-nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
-               struct nft_jumpstack *jumpstack, unsigned int stackptr)
-{
-       struct nft_stats __percpu *stats;
-       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
-
-       rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(chain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
-       rcu_read_unlock_bh();
-}
-
 enum nft_trace {
        NFT_TRACE_RULE,
        NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
 unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
-       const struct nft_chain *chain = ops->priv;
+       const struct nft_chain *chain = ops->priv, *basechain = chain;
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       int rulenum = 0;
+       struct nft_stats __percpu *stats;
+       int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
         * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
 
 do_chain:
+       rulenum = 0;
        rule = list_entry(&chain->rules, struct nft_rule, list);
 next_rule:
        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
                switch (data[NFT_REG_VERDICT].verdict) {
                case NFT_BREAK:
                        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
-                       /* fall through */
+                       continue;
                case NFT_CONTINUE:
+                       if (unlikely(pkt->skb->nf_trace))
+                               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                        continue;
                }
                break;
@@ -183,37 +173,44 @@ next_rule:
                jumpstack[stackptr].rule  = rule;
                jumpstack[stackptr].rulenum = rulenum;
                stackptr++;
-               /* fall through */
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
        case NFT_GOTO:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
                chain = data[NFT_REG_VERDICT].chain;
                goto do_chain;
        case NFT_RETURN:
                if (unlikely(pkt->skb->nf_trace))
                        nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-
-               /* fall through */
+               break;
        case NFT_CONTINUE:
+               if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
                break;
        default:
                WARN_ON(1);
        }
 
        if (stackptr > 0) {
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
-
                stackptr--;
                chain = jumpstack[stackptr].chain;
                rule  = jumpstack[stackptr].rule;
                rulenum = jumpstack[stackptr].rulenum;
                goto next_rule;
        }
-       nft_chain_stats(chain, pkt, jumpstack, stackptr);
 
        if (unlikely(pkt->skb->nf_trace))
-               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+               nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(basechain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
 
-       return nft_base_chain(chain)->policy;
+       return nft_base_chain(basechain)->policy;
 }
 EXPORT_SYMBOL_GPL(nft_do_chain);
 
index e009087620e30ecdae7fe1bda155c0d905d9f6c0..23ef77c60fffc60a1678f79dfbb769fe9be6a274 100644 (file)
@@ -256,15 +256,15 @@ replay:
 #endif
                {
                        nfnl_unlock(subsys_id);
-                       kfree_skb(nskb);
-                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       return kfree_skb(nskb);
                }
        }
 
        if (!ss->commit || !ss->abort) {
                nfnl_unlock(subsys_id);
-               kfree_skb(nskb);
-               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               netlink_ack(skb, nlh, -EOPNOTSUPP);
+               return kfree_skb(skb);
        }
 
        while (skb->len >= nlmsg_total_size(0)) {
index 7633a752c65e99189c3e7603e2ebdd6e7438b356..0ad080790a32a341a1ddc57d632302563964a247 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
        _debug("tktlen: %x", tktlen);
        if (tktlen > AFSTOKEN_RK_TIX_MAX)
                return -EKEYREJECTED;
-       if (8 * 4 + tktlen != toklen)
+       if (toklen < 8 * 4 + tktlen)
                return -EKEYREJECTED;
 
        plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
index eed8404443d8f0145942c3e459b79934ada9c48d..f435a88d899afff7c132085ccece5427461e5490 100644 (file)
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
 };
 
+static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+{
+       memset(r, 0, sizeof(*r));
+       tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+}
+
 static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                return err;
 
        memcpy(&cp, p, sizeof(cp));
-       memset(&new_filter_result, 0, sizeof(new_filter_result));
-       tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       tcindex_filter_result_init(&new_filter_result);
 
+       tcindex_filter_result_init(&cr);
        if (old_r)
-               memcpy(&cr, r, sizeof(cr));
-       else {
-               memset(&cr, 0, sizeof(cr));
-               tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       }
+               cr.res = r->res;
 
        if (tb[TCA_TCINDEX_HASH])
                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        err = -ENOMEM;
        if (!cp.perfect && !cp.h) {
                if (valid_perfect_hash(&cp)) {
+                       int i;
+
                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
                        if (!cp.perfect)
                                goto errout;
+                       for (i = 0; i < cp.hash; i++)
+                               tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+                                             TCA_TCINDEX_POLICE);
                        balloc = 1;
                } else {
                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                tcf_bind_filter(tp, &cr.res, base);
        }
 
-       tcf_exts_change(tp, &cr.exts, &e);
+       if (old_r)
+               tcf_exts_change(tp, &r->exts, &e);
+       else
+               tcf_exts_change(tp, &cr.exts, &e);
 
        tcf_tree_lock(tp);
        if (old_r && old_r != r)
-               memset(old_r, 0, sizeof(*old_r));
+               tcindex_filter_result_init(old_r);
 
        memcpy(p, &cp, sizeof(cp));
-       memcpy(r, &cr, sizeof(cr));
+       r->res = cr.res;
 
        if (r == &new_filter_result) {
                struct tcindex_filter **fp;
index 7d09a712cb1f1353f13310f5c68b38e750d199a6..88f108edfb586ef3b2d17d15cf66b00da84f93f0 100644 (file)
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+       ASSERT_RTNL();
+
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
+}
+EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
+
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+{
+       rtnl_lock();
+       cfg80211_sched_scan_stopped_rtnl(wiphy);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
index acdcb4a81817b7c78e8e721ff632284b9b806fa9..3546a77033de30d0d2593c6ac4fbac8f58659025 100644 (file)
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
-                       cfg80211_sme_free(wdev);
                }
                wdev_unlock(wdev);
        }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
                }
+               cfg80211_sme_free(wdev);
                return;
        }
 
index bcae806b0c398d5450975ab08001e7a808e376c4..9a617adc6675dc06552de428c93b3c611599900b 100644 (file)
@@ -44,6 +44,9 @@ cpupower: FORCE
 cgroup firewire hv guest usb virtio vm net: FORCE
        $(call descend,$@)
 
+liblockdep: FORCE
+       $(call descend,lib/lockdep)
+
 libapikfs: FORCE
        $(call descend,lib/api)
 
@@ -91,6 +94,9 @@ cpupower_clean:
 cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
        $(call descend,$(@:_clean=),clean)
 
+liblockdep_clean:
+       $(call descend,lib/lockdep,clean)
+
 libapikfs_clean:
        $(call descend,lib/api,clean)
 
index cb09d3ff8f5856dab489130133ec9c2e2ba73db8..bba2f5253b6e281ff85abdf65a76406d88d6503b 100644 (file)
@@ -1,8 +1,7 @@
 # file format version
 FILE_VERSION = 1
 
-MAKEFLAGS += --no-print-directory
-LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
+LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
 
 # Makefiles suck: This macro sets a default value of $(2) for the
 # variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
 install: install_lib
 
 clean:
-       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+       $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
        $(RM) tags TAGS
 
 endif # skip-makefile
This page took 0.213367 seconds and 5 git commands to generate.