]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'mfd-fixes-4.9.1' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Nov 2016 19:36:35 +0000 (11:36 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Nov 2016 19:36:35 +0000 (11:36 -0800)
Pull MFD fixes from Lee Jones:
 "Received a copule of last minute fixes for v4.9.

  The patches from Viresh are fixing issues displayed in KernelCI"

* tag 'mfd-fixes-4.9.1' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd:
  mfd: wm8994-core: Don't use managed regulator bulk get API
  mfd: wm8994-core: Disable regulators before removing them
  mfd: syscon: Support native-endian regmaps

149 files changed:
arch/sparc/Kconfig
arch/sparc/include/asm/hypervisor.h
arch/sparc/include/asm/iommu_64.h
arch/sparc/kernel/hvapi.c
arch/sparc/kernel/iommu.c
arch/sparc/kernel/iommu_common.h
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/pci_sun4v.h
arch/sparc/kernel/pci_sun4v_asm.S
arch/sparc/kernel/signal_32.c
arch/sparc/mm/init_64.c
arch/tile/kernel/time.c
arch/x86/boot/compressed/Makefile
arch/x86/boot/cpu.c
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/perf_event.h
arch/x86/kernel/dumpstack.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/head_32.S
arch/x86/kernel/sysfb_simplefb.c
arch/x86/kernel/unwind_guess.c
arch/x86/mm/extable.c
arch/x86/platform/intel-mid/device_libs/Makefile
arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_wdt.c [deleted file]
crypto/algif_hash.c
crypto/scatterwalk.c
drivers/clk/berlin/bg2.c
drivers/clk/berlin/bg2q.c
drivers/clk/clk-efm32gg.c
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
drivers/clk/sunxi/clk-sunxi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-lg.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-rmi.c
drivers/hid/hid-sensor-hub.c
drivers/media/tuners/tuner-xc2028.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci.h
drivers/net/dsa/b53/b53_common.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nic_reg.h
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sunbmac.h
drivers/net/ethernet/sun/sunqe.c
drivers/net/ethernet/sun/sunqe.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/vitesse.c
drivers/net/virtio_net.c
drivers/net/wireless/mac80211_hwsim.c
drivers/of/of_mdio.c
drivers/phy/phy-twl4030-usb.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_os.c
drivers/thermal/intel_powerclamp.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/udc.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/tusb6010.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/storage/transport.c
fs/nfs/callback.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
include/linux/bpf_verifier.h
include/linux/sched.h
include/net/gro_cells.h
include/net/ip_fib.h
include/net/net_namespace.h
init/do_mounts_rd.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/exit.c
kernel/locking/lockdep_internals.h
kernel/sched/auto_group.c
lib/Kconfig.debug
net/batman-adv/hard-interface.c
net/batman-adv/tp_meter.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/tcp_cong.c
net/ipv4/udp.c
net/ipv6/ip6_tunnel.c
net/ipv6/udp.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/vht.c
net/sched/cls_api.c
net/tipc/socket.c
net/unix/af_unix.c
net/wireless/core.h
net/wireless/scan.c
net/wireless/util.c
security/apparmor/domain.c

index b23c76b42d6e8c1508c04e3dbb65b05b24bce7c3..165ecdd24d22dec52108d54d2b31db4a38f55aab 100644 (file)
@@ -43,6 +43,7 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select HAVE_ARCH_HARDENED_USERCOPY
+       select PROVE_LOCKING_SMALL if PROVE_LOCKING
 
 config SPARC32
        def_bool !64BIT
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
+config ARCH_ATU
+       bool
+       default y if SPARC64
+
+config ARCH_DMA_ADDR_T_64BIT
+       bool
+       default y if ARCH_ATU
+
 config IOMMU_HELPER
        bool
        default y if SPARC64
@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
 config ARCH_SPARSEMEM_DEFAULT
        def_bool y if SPARC64
 
+config FORCE_MAX_ZONEORDER
+       int "Maximum zone order"
+       default "13"
+       help
+         The kernel memory allocator divides physically contiguous memory
+         blocks into "zones", where each zone is a power of two number of
+         pages.  This option selects the largest power of two that the kernel
+         keeps in the memory allocator.  If you need to allocate very large
+         blocks of physically contiguous memory, then you may need to
+         increase this value.
+
+         This config option is actually maximum order plus one. For example,
+         a value of 13 means that the largest free memory block is 2^12 pages.
+
 source "mm/Kconfig"
 
 if SPARC64
index 666d5ba230d2f83f0659000e85ade4f4c025f319..73cb8978df58c9f25e2f06101a84bf1101dc7cf3 100644 (file)
@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
  */
 #define HV_FAST_PCI_MSG_SETVALID       0xd3
 
+/* PCI IOMMU v2 definitions and services
+ *
+ * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
+ * definitions and services.
+ *
+ *     CTE             Clump Table Entry. First level table entry in the ATU.
+ *
+ *     pci_device_list
+ *                     A 32-bit aligned list of pci_devices.
+ *
+ *     pci_device_listp
+ *                     real address of a pci_device_list. 32-bit aligned.
+ *
+ *     iotte           IOMMU translation table entry.
+ *
+ *     iotte_attributes
+ *                     IO Attributes for IOMMU v2 mappings. In addition to
+ *                     read, write IOMMU v2 supports relax ordering
+ *
+ *     io_page_list    A 64-bit aligned list of real addresses. Each real
+ *                     address in an io_page_list must be properly aligned
+ *                     to the pagesize of the given IOTSB.
+ *
+ *     io_page_list_p  Real address of an io_page_list, 64-bit aligned.
+ *
+ *     IOTSB           IO Translation Storage Buffer. An aligned table of
+ *                     IOTTEs. Each IOTSB has a pagesize, table size, and
+ *                     virtual address associated with it that must match
+ *                     a pagesize and table size supported by the un-derlying
+ *                     hardware implementation. The alignment requirements
+ *                     for an IOTSB depend on the pagesize used for that IOTSB.
+ *                     Each IOTTE in an IOTSB maps one pagesize-sized page.
+ *                     The size of the IOTSB dictates how large of a virtual
+ *                     address space the IOTSB is capable of mapping.
+ *
+ *     iotsb_handle    An opaque identifier for an IOTSB. A devhandle plus
+ *                     iotsb_handle represents a binding of an IOTSB to a
+ *                     PCI root complex.
+ *
+ *     iotsb_index     Zero-based IOTTE number within an IOTSB.
+ */
+
+/* The index_count argument consists of two fields:
+ * bits 63:48 #iottes and bits 47:0 iotsb_index
+ */
+#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
+       (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
+
+/* pci_iotsb_conf()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_CONF
+ * ARG0:       devhandle
+ * ARG1:       r_addr
+ * ARG2:       size
+ * ARG3:       pagesize
+ * ARG4:       iova
+ * RET0:       status
+ * RET1:       iotsb_handle
+ * ERRORS:     EINVAL          Invalid devhandle, size, iova, or pagesize
+ *             EBADALIGN       r_addr is not properly aligned
+ *             ENORADDR        r_addr is not a valid real address
+ *             ETOOMANY        No further IOTSBs may be configured
+ *             EBUSY           Duplicate devhandle, raddir, iova combination
+ *
+ * Create an IOTSB suitable for the PCI root complex identified by devhandle,
+ * for the DMA virtual address defined by the argument iova.
+ *
+ * r_addr is the properly aligned base address of the IOTSB and size is the
+ * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
+ * being configured. If it contains any values other than zeros then the
+ * behavior is undefined.
+ *
+ * pagesize is the size of each page in the IOTSB. Note that the combination of
+ * size (table size) and pagesize must be valid.
+ *
+ * virt is the DMA virtual address this IOTSB will map.
+ *
+ * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
+ * Once configured, privileged access to the IOTSB memory is prohibited and
+ * creates undefined behavior. The only permitted access is indirect via these
+ * services.
+ */
+#define HV_FAST_PCI_IOTSB_CONF         0x190
+
+/* pci_iotsb_info()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_INFO
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * RET0:       status
+ * RET1:       r_addr
+ * RET2:       size
+ * RET3:       pagesize
+ * RET4:       iova
+ * RET5:       #bound
+ * ERRORS:     EINVAL  Invalid devhandle or iotsb_handle
+ *
+ * This service returns configuration information about an IOTSB previously
+ * created with pci_iotsb_conf.
+ *
+ * iotsb_handle value 0 may be used with this service to inquire about the
+ * legacy IOTSB that may or may not exist. If the service succeeds, the return
+ * values describe the legacy IOTSB and I/O virtual addresses mapped by that
+ * table. However, the table base address r_addr may contain the value -1 which
+ * indicates a memory range that cannot be accessed or be reclaimed.
+ *
+ * The return value #bound contains the number of PCI devices that iotsb_handle
+ * is currently bound to.
+ */
+#define HV_FAST_PCI_IOTSB_INFO         0x191
+
+/* pci_iotsb_unconf()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_UNCONF
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * RET0:       status
+ * ERRORS:     EINVAL  Invalid devhandle or iotsb_handle
+ *             EBUSY   The IOTSB is bound and may not be unconfigured
+ *
+ * This service unconfigures the IOTSB identified by the devhandle and
+ * iotsb_handle arguments, previously created with pci_iotsb_conf.
+ * The IOTSB must not be currently bound to any device or the service will fail
+ *
+ * If the call succeeds, iotsb_handle is no longer valid.
+ */
+#define HV_FAST_PCI_IOTSB_UNCONF       0x192
+
+/* pci_iotsb_bind()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_BIND
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       pci_device
+ * RET0:       status
+ * ERRORS:     EINVAL  Invalid devhandle, iotsb_handle, or pci_device
+ *             EBUSY   A PCI function is already bound to an IOTSB at the same
+ *                     address range as specified by devhandle, iotsb_handle.
+ *
+ * This service binds the PCI function specified by the argument pci_device to
+ * the IOTSB specified by the arguments devhandle and iotsb_handle.
+ *
+ * The PCI device function is bound to the specified IOTSB with the IOVA range
+ * specified when the IOTSB was configured via pci_iotsb_conf. If the function
+ * is already bound then it is unbound first.
+ */
+#define HV_FAST_PCI_IOTSB_BIND         0x193
+
+/* pci_iotsb_unbind()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_UNBIND
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       pci_device
+ * RET0:       status
+ * ERRORS:     EINVAL  Invalid devhandle, iotsb_handle, or pci_device
+ *             ENOMAP  The PCI function was not bound to the specified IOTSB
+ *
+ * This service unbinds the PCI device specified by the argument pci_device
+ * from the IOTSB identified  * by the arguments devhandle and iotsb_handle.
+ *
+ * If the PCI device is not bound to the specified IOTSB then this service will
+ * fail with status ENOMAP
+ */
+#define HV_FAST_PCI_IOTSB_UNBIND       0x194
+
+/* pci_iotsb_get_binding()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_GET_BINDING
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       iova
+ * RET0:       status
+ * RET1:       iotsb_handle
+ * ERRORS:     EINVAL  Invalid devhandle, pci_device, or iova
+ *             ENOMAP  The PCI function is not bound to an IOTSB at iova
+ *
+ * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
+ * and DMA virtual address, iova.
+ *
+ * iova must be the base address of a DMA virtual address range as defined by
+ * the iommu-address-ranges property in the root complex device node defined
+ * by the argument devhandle.
+ */
+#define HV_FAST_PCI_IOTSB_GET_BINDING  0x195
+
+/* pci_iotsb_map()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_MAP
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       index_count
+ * ARG3:       iotte_attributes
+ * ARG4:       io_page_list_p
+ * RET0:       status
+ * RET1:       #mapped
+ * ERRORS:     EINVAL          Invalid devhandle, iotsb_handle, #iottes,
+ *                             iotsb_index or iotte_attributes
+ *             EBADALIGN       Improperly aligned io_page_list_p or I/O page
+ *                             address in the I/O page list.
+ *             ENORADDR        Invalid io_page_list_p or I/O page address in
+ *                             the I/O page list.
+ *
+ * This service creates and flushes mappings in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The index_count argument consists of two fields. Bits 63:48 contain #iotte
+ * and bits 47:0 contain iotsb_index
+ *
+ * The first mapping is created in the IOTSB index specified by iotsb_index.
+ * Subsequent mappings are  created at iotsb_index+1 and so on.
+ *
+ * The attributes of each mapping are defined by the argument iotte_attributes.
+ *
+ * The io_page_list_p specifies the real address of the 64-bit-aligned list of
+ * #iottes I/O page addresses. Each page address must be a properly aligned
+ * real address of a page to be mapped in the IOTSB. The first entry in the I/O
+ * page list contains the real address of the first page, the 2nd entry for the
+ * 2nd page, and so on.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The return value #mapped is the actual number of mappings created, which may
+ * be less than or equal to the argument #iottes. If the function returns
+ * successfully with a #mapped value less than the requested #iottes then the
+ * caller should continue to invoke the service with updated iotsb_index,
+ * #iottes, and io_page_list_p arguments until all pages are mapped.
+ *
+ * This service must not be used to demap a mapping. In other words, all
+ * mappings must be valid and have  one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP          0x196
+
+/* pci_iotsb_map_one()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_MAP_ONE
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       iotsb_index
+ * ARG3:       iotte_attributes
+ * ARG4:       r_addr
+ * RET0:       status
+ * ERRORS:     EINVAL          Invalid devhandle,iotsb_handle, iotsb_index
+ *                             or iotte_attributes
+ *             EBADALIGN       Improperly aligned r_addr
+ *             ENORADDR        Invalid r_addr
+ *
+ * This service creates and flushes a single mapping in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The mapping for the page at r_addr is created at the IOTSB index specified by
+ * iotsb_index with  the attributes iotte_attributes.
+ *
+ * This service must not be used to demap a mapping. In other words, the mapping
+ * must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP_ONE      0x197
+
+/* pci_iotsb_demap()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_DEMAP
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       iotsb_index
+ * ARG3:       #iottes
+ * RET0:       status
+ * RET1:       #unmapped
+ * ERRORS:     EINVAL  Invalid devhandle, iotsb_handle, iotsb_index or #iottes
+ *
+ * This service unmaps and flushes up to #iottes mappings starting at index
+ * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
+ * than or equal to the requested number of IOTTEs, #iottes.
+ *
+ * If #unmapped is less than #iottes, the caller should continue to invoke this
+ * service with updated iotsb_index and #iottes arguments until all pages are
+ * demapped.
+ */
+#define HV_FAST_PCI_IOTSB_DEMAP                0x198
+
+/* pci_iotsb_getmap()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_GETMAP
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       iotsb_index
+ * RET0:       status
+ * RET1:       r_addr
+ * RET2:       iotte_attributes
+ * ERRORS:     EINVAL  Invalid devhandle, iotsb_handle, or iotsb_index
+ *             ENOMAP  No mapping was found
+ *
+ * This service returns the mapping specified by index iotsb_index from the
+ * IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * Upon success, the real address of the mapping shall be returned in
+ * r_addr and thethe IOTTE mapping attributes shall be returned in
+ * iotte_attributes.
+ *
+ * The return value iotte_attributes may not include optional features used in
+ * the call to create the  mapping.
+ */
+#define HV_FAST_PCI_IOTSB_GETMAP       0x199
+
+/* pci_iotsb_sync_mappings()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
+ * ARG0:       devhandle
+ * ARG1:       iotsb_handle
+ * ARG2:       iotsb_index
+ * ARG3:       #iottes
+ * RET0:       status
+ * RET1:       #synced
+ * ERROS:      EINVAL  Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
+ *
+ * This service synchronizes #iottes mappings starting at index iotsb_index in
+ * the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs synchronized is returned in #synced, which may
+ * be less than or equal to the requested number, #iottes.
+ *
+ * Upon a successful return, #synced is less than #iottes, the caller should
+ * continue to invoke this service with updated iotsb_index and #iottes
+ * arguments until all pages are synchronized.
+ */
+#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS        0x19a
+
 /* Logical Domain Channel services.  */
 
 #define LDC_CHANNEL_DOWN               0
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
 #define HV_GRP_SDIO                    0x0108
 #define HV_GRP_SDIO_ERR                        0x0109
 #define HV_GRP_REBOOT_DATA             0x0110
+#define HV_GRP_ATU                     0x0111
 #define HV_GRP_M7_PERF                 0x0114
 #define HV_GRP_NIAG_PERF               0x0200
 #define HV_GRP_FIRE_PERF               0x0201
index cd0d69fa7592e64d6ac564eafac8ee622ea7ccfc..f24f356f250376e0931a4d4bc5d6bc631f469a74 100644 (file)
@@ -24,8 +24,36 @@ struct iommu_arena {
        unsigned int    limit;
 };
 
+#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
+
+/* Data structures for SPARC ATU architecture */
+struct atu_iotsb {
+       void    *table;         /* IOTSB table base virtual addr*/
+       u64     ra;             /* IOTSB table real addr */
+       u64     dvma_size;      /* ranges[3].size or OS slected 32G size */
+       u64     dvma_base;      /* ranges[3].base */
+       u64     table_size;     /* IOTSB table size */
+       u64     page_size;      /* IO PAGE size for IOTSB */
+       u32     iotsb_num;      /* tsbnum is same as iotsb_handle */
+};
+
+struct atu_ranges {
+       u64     base;
+       u64     size;
+};
+
+struct atu {
+       struct  atu_ranges      *ranges;
+       struct  atu_iotsb       *iotsb;
+       struct  iommu_map_table tbl;
+       u64                     base;
+       u64                     size;
+       u64                     dma_addr_mask;
+};
+
 struct iommu {
        struct iommu_map_table  tbl;
+       struct atu              *atu;
        spinlock_t              lock;
        u32                     dma_addr_mask;
        iopte_t                 *page_table;
index 662500fa555f74160f6449143e6d0785af2643e9..267731234ce8a2ee2cdc03686377c11a2ea6205f 100644 (file)
@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
        { .group = HV_GRP_SDIO,                                 },
        { .group = HV_GRP_SDIO_ERR,                             },
        { .group = HV_GRP_REBOOT_DATA,                          },
+       { .group = HV_GRP_ATU,          .flags = FLAG_PRE_API   },
        { .group = HV_GRP_NIAG_PERF,    .flags = FLAG_PRE_API   },
        { .group = HV_GRP_FIRE_PERF,                            },
        { .group = HV_GRP_N2_CPU,                               },
index 5c615abff030fdb6c26a7c68c86d5c3fa0544b94..852a3291db968986a0f77d240ccf7b58c3f9070a 100644 (file)
@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
        struct iommu *iommu = dev->archdata.iommu;
        u64 dma_addr_mask = iommu->dma_addr_mask;
 
-       if (device_mask >= (1UL << 32UL))
-               return 0;
+       if (device_mask > DMA_BIT_MASK(32)) {
+               if (iommu->atu)
+                       dma_addr_mask = iommu->atu->dma_addr_mask;
+               else
+                       return 0;
+       }
 
        if ((device_mask & dma_addr_mask) == dma_addr_mask)
                return 1;
index b40cec25290503b72dbf1d8c18e8f374ab51b44e..828493329f68d05e7ab1c47d6449a9f76adedddd 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/scatterlist.h>
 #include <linux/device.h>
 #include <linux/iommu-helper.h>
-#include <linux/scatterlist.h>
 
 #include <asm/iommu.h>
 
index db57d8acdc01cf52aeb1664acdc35fef77b3a21a..06981cc716b68022712dc3a872e611565e4e4817 100644 (file)
@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
        { .major = 1, .minor = 1 },
 };
 
+static unsigned long vatu_major = 1;
+static unsigned long vatu_minor = 1;
+
 #define PGLIST_NENTS   (PAGE_SIZE / sizeof(u64))
 
 struct iommu_batch {
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
 }
 
 /* Interrupts must be disabled.  */
-static long iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
 {
        struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+       u64 *pglist = p->pglist;
+       u64 index_count;
        unsigned long devhandle = pbm->devhandle;
        unsigned long prot = p->prot;
        unsigned long entry = p->entry;
-       u64 *pglist = p->pglist;
        unsigned long npages = p->npages;
+       unsigned long iotsb_num;
+       unsigned long ret;
+       long num;
 
        /* VPCI maj=1, min=[0,1] only supports read and write */
        if (vpci_major < 2)
                prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 
        while (npages != 0) {
-               long num;
-
-               num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
-                                         npages, prot, __pa(pglist));
-               if (unlikely(num < 0)) {
-                       if (printk_ratelimit())
-                               printk("iommu_batch_flush: IOMMU map of "
-                                      "[%08lx:%08llx:%lx:%lx:%lx] failed with "
-                                      "status %ld\n",
-                                      devhandle, HV_PCI_TSBID(0, entry),
-                                      npages, prot, __pa(pglist), num);
-                       return -1;
+               if (mask <= DMA_BIT_MASK(32)) {
+                       num = pci_sun4v_iommu_map(devhandle,
+                                                 HV_PCI_TSBID(0, entry),
+                                                 npages,
+                                                 prot,
+                                                 __pa(pglist));
+                       if (unlikely(num < 0)) {
+                               pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
+                                                  __func__,
+                                                  devhandle,
+                                                  HV_PCI_TSBID(0, entry),
+                                                  npages, prot, __pa(pglist),
+                                                  num);
+                               return -1;
+                       }
+               } else {
+                       index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
+                       iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
+                       ret = pci_sun4v_iotsb_map(devhandle,
+                                                 iotsb_num,
+                                                 index_count,
+                                                 prot,
+                                                 __pa(pglist),
+                                                 &num);
+                       if (unlikely(ret != HV_EOK)) {
+                               pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
+                                                  __func__,
+                                                  devhandle, iotsb_num,
+                                                  index_count, prot,
+                                                  __pa(pglist), ret);
+                               return -1;
+                       }
                }
-
                entry += num;
                npages -= num;
                pglist += num;
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
        return 0;
 }
 
-static inline void iommu_batch_new_entry(unsigned long entry)
+static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
 {
        struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 
        if (p->entry + p->npages == entry)
                return;
        if (p->entry != ~0UL)
-               iommu_batch_flush(p);
+               iommu_batch_flush(p, mask);
        p->entry = entry;
 }
 
 /* Interrupts must be disabled.  */
-static inline long iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page, u64 mask)
 {
        struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
 
        p->pglist[p->npages++] = phys_page;
        if (p->npages == PGLIST_NENTS)
-               return iommu_batch_flush(p);
+               return iommu_batch_flush(p, mask);
 
        return 0;
 }
 
 /* Interrupts must be disabled.  */
-static inline long iommu_batch_end(void)
+static inline long iommu_batch_end(u64 mask)
 {
        struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 
        BUG_ON(p->npages >= PGLIST_NENTS);
 
-       return iommu_batch_flush(p);
+       return iommu_batch_flush(p, mask);
 }
 
 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
                                   dma_addr_t *dma_addrp, gfp_t gfp,
                                   unsigned long attrs)
 {
+       u64 mask;
        unsigned long flags, order, first_page, npages, n;
        unsigned long prot = 0;
        struct iommu *iommu;
+       struct atu *atu;
+       struct iommu_map_table *tbl;
        struct page *page;
        void *ret;
        long entry;
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
        iommu = dev->archdata.iommu;
+       atu = iommu->atu;
+
+       mask = dev->coherent_dma_mask;
+       if (mask <= DMA_BIT_MASK(32))
+               tbl = &iommu->tbl;
+       else
+               tbl = &atu->tbl;
 
-       entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+       entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
                                      (unsigned long)(-1), 0);
 
        if (unlikely(entry == IOMMU_ERROR_CODE))
                goto range_alloc_fail;
 
-       *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+       *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
        ret = (void *) first_page;
        first_page = __pa(first_page);
 
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
                          entry);
 
        for (n = 0; n < npages; n++) {
-               long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+               long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
                if (unlikely(err < 0L))
                        goto iommu_map_fail;
        }
 
-       if (unlikely(iommu_batch_end() < 0L))
+       if (unlikely(iommu_batch_end(mask) < 0L))
                goto iommu_map_fail;
 
        local_irq_restore(flags);
@@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        return ret;
 
 iommu_map_fail:
-       iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
+       iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
 
 range_alloc_fail:
        free_pages(first_page, order);
        return NULL;
 }
 
-static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
-                              unsigned long npages)
+unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
+                               unsigned long iotsb_num,
+                               struct pci_bus *bus_dev)
+{
+       struct pci_dev *pdev;
+       unsigned long err;
+       unsigned int bus;
+       unsigned int device;
+       unsigned int fun;
+
+       list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
+               if (pdev->subordinate) {
+                       /* No need to bind pci bridge */
+                       dma_4v_iotsb_bind(devhandle, iotsb_num,
+                                         pdev->subordinate);
+               } else {
+                       bus = bus_dev->number;
+                       device = PCI_SLOT(pdev->devfn);
+                       fun = PCI_FUNC(pdev->devfn);
+                       err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
+                                                  HV_PCI_DEVICE_BUILD(bus,
+                                                                      device,
+                                                                      fun));
+
+                       /* If bind fails for one device it is going to fail
+                        * for rest of the devices because we are sharing
+                        * IOTSB. So in case of failure simply return with
+                        * error.
+                        */
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
+static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
+                              dma_addr_t dvma, unsigned long iotsb_num,
+                              unsigned long entry, unsigned long npages)
 {
-       u32 devhandle = *(u32 *)demap_arg;
        unsigned long num, flags;
+       unsigned long ret;
 
        local_irq_save(flags);
        do {
-               num = pci_sun4v_iommu_demap(devhandle,
-                                           HV_PCI_TSBID(0, entry),
-                                           npages);
-
+               if (dvma <= DMA_BIT_MASK(32)) {
+                       num = pci_sun4v_iommu_demap(devhandle,
+                                                   HV_PCI_TSBID(0, entry),
+                                                   npages);
+               } else {
+                       ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
+                                                   entry, npages, &num);
+                       if (unlikely(ret != HV_EOK)) {
+                               pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
+                                                  ret);
+                       }
+               }
                entry += num;
                npages -= num;
        } while (npages != 0);
@@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 {
        struct pci_pbm_info *pbm;
        struct iommu *iommu;
+       struct atu *atu;
+       struct iommu_map_table *tbl;
        unsigned long order, npages, entry;
+       unsigned long iotsb_num;
        u32 devhandle;
 
        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
        iommu = dev->archdata.iommu;
        pbm = dev->archdata.host_controller;
+       atu = iommu->atu;
        devhandle = pbm->devhandle;
-       entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
-       dma_4v_iommu_demap(&devhandle, entry, npages);
-       iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
+
+       if (dvma <= DMA_BIT_MASK(32)) {
+               tbl = &iommu->tbl;
+               iotsb_num = 0; /* we don't care for legacy iommu */
+       } else {
+               tbl = &atu->tbl;
+               iotsb_num = atu->iotsb->iotsb_num;
+       }
+       entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
+       dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
+       iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
        order = get_order(size);
        if (order < 10)
                free_pages((unsigned long)cpu, order);
@@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
                                  unsigned long attrs)
 {
        struct iommu *iommu;
+       struct atu *atu;
+       struct iommu_map_table *tbl;
+       u64 mask;
        unsigned long flags, npages, oaddr;
        unsigned long i, base_paddr;
-       u32 bus_addr, ret;
        unsigned long prot;
+       dma_addr_t bus_addr, ret;
        long entry;
 
        iommu = dev->archdata.iommu;
+       atu = iommu->atu;
 
        if (unlikely(direction == DMA_NONE))
                goto bad;
@@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
        npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
 
-       entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+       mask = *dev->dma_mask;
+       if (mask <= DMA_BIT_MASK(32))
+               tbl = &iommu->tbl;
+       else
+               tbl = &atu->tbl;
+
+       entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
                                      (unsigned long)(-1), 0);
 
        if (unlikely(entry == IOMMU_ERROR_CODE))
                goto bad;
 
-       bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+       bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
        ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
        base_paddr = __pa(oaddr & IO_PAGE_MASK);
        prot = HV_PCI_MAP_ATTR_READ;
@@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
        iommu_batch_start(dev, prot, entry);
 
        for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
-               long err = iommu_batch_add(base_paddr);
+               long err = iommu_batch_add(base_paddr, mask);
                if (unlikely(err < 0L))
                        goto iommu_map_fail;
        }
-       if (unlikely(iommu_batch_end() < 0L))
+       if (unlikely(iommu_batch_end(mask) < 0L))
                goto iommu_map_fail;
 
        local_irq_restore(flags);
@@ -310,7 +414,7 @@ bad:
        return DMA_ERROR_CODE;
 
 iommu_map_fail:
-       iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+       iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
        return DMA_ERROR_CODE;
 }
 
@@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 {
        struct pci_pbm_info *pbm;
        struct iommu *iommu;
+       struct atu *atu;
+       struct iommu_map_table *tbl;
        unsigned long npages;
+       unsigned long iotsb_num;
        long entry;
        u32 devhandle;
 
@@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 
        iommu = dev->archdata.iommu;
        pbm = dev->archdata.host_controller;
+       atu = iommu->atu;
        devhandle = pbm->devhandle;
 
        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
        bus_addr &= IO_PAGE_MASK;
-       entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
-       dma_4v_iommu_demap(&devhandle, entry, npages);
-       iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+
+       if (bus_addr <= DMA_BIT_MASK(32)) {
+               iotsb_num = 0; /* we don't care for legacy iommu */
+               tbl = &iommu->tbl;
+       } else {
+               iotsb_num = atu->iotsb->iotsb_num;
+               tbl = &atu->tbl;
+       }
+       entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
+       dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
+       iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 }
 
 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
        unsigned long seg_boundary_size;
        int outcount, incount, i;
        struct iommu *iommu;
+       struct atu *atu;
+       struct iommu_map_table *tbl;
+       u64 mask;
        unsigned long base_shift;
        long err;
 
        BUG_ON(direction == DMA_NONE);
 
        iommu = dev->archdata.iommu;
+       atu = iommu->atu;
+
        if (nelems == 0 || !iommu)
                return 0;
        
@@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
        max_seg_size = dma_get_max_seg_size(dev);
        seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
-       base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
+
+       mask = *dev->dma_mask;
+       if (mask <= DMA_BIT_MASK(32))
+               tbl = &iommu->tbl;
+       else
+               tbl = &atu->tbl;
+
+       base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
+
        for_each_sg(sglist, s, nelems, i) {
                unsigned long paddr, npages, entry, out_entry = 0, slen;
 
@@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                /* Allocate iommu entries for that segment */
                paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
                npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
-               entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+               entry = iommu_tbl_range_alloc(dev, tbl, npages,
                                              &handle, (unsigned long)(-1), 0);
 
                /* Handle failure */
                if (unlikely(entry == IOMMU_ERROR_CODE)) {
-                       if (printk_ratelimit())
-                               printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
-                                      " npages %lx\n", iommu, paddr, npages);
+                       pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
+                                          tbl, paddr, npages);
                        goto iommu_map_failed;
                }
 
-               iommu_batch_new_entry(entry);
+               iommu_batch_new_entry(entry, mask);
 
                /* Convert entry to a dma_addr_t */
-               dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
+               dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
                dma_addr |= (s->offset & ~IO_PAGE_MASK);
 
                /* Insert into HW table */
                paddr &= IO_PAGE_MASK;
                while (npages--) {
-                       err = iommu_batch_add(paddr);
+                       err = iommu_batch_add(paddr, mask);
                        if (unlikely(err < 0L))
                                goto iommu_map_failed;
                        paddr += IO_PAGE_SIZE;
@@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                dma_next = dma_addr + slen;
        }
 
-       err = iommu_batch_end();
+       err = iommu_batch_end(mask);
 
        if (unlikely(err < 0L))
                goto iommu_map_failed;
@@ -475,7 +603,7 @@ iommu_map_failed:
                        vaddr = s->dma_address & IO_PAGE_MASK;
                        npages = iommu_num_pages(s->dma_address, s->dma_length,
                                                 IO_PAGE_SIZE);
-                       iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+                       iommu_tbl_range_free(tbl, vaddr, npages,
                                             IOMMU_ERROR_CODE);
                        /* XXX demap? XXX */
                        s->dma_address = DMA_ERROR_CODE;
@@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct pci_pbm_info *pbm;
        struct scatterlist *sg;
        struct iommu *iommu;
+       struct atu *atu;
        unsigned long flags, entry;
+       unsigned long iotsb_num;
        u32 devhandle;
 
        BUG_ON(direction == DMA_NONE);
 
        iommu = dev->archdata.iommu;
        pbm = dev->archdata.host_controller;
+       atu = iommu->atu;
        devhandle = pbm->devhandle;
        
        local_irq_save(flags);
@@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
                dma_addr_t dma_handle = sg->dma_address;
                unsigned int len = sg->dma_length;
                unsigned long npages;
-               struct iommu_map_table *tbl = &iommu->tbl;
+               struct iommu_map_table *tbl;
                unsigned long shift = IO_PAGE_SHIFT;
 
                if (!len)
                        break;
                npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
+
+               if (dma_handle <= DMA_BIT_MASK(32)) {
+                       iotsb_num = 0; /* we don't care for legacy iommu */
+                       tbl = &iommu->tbl;
+               } else {
+                       iotsb_num = atu->iotsb->iotsb_num;
+                       tbl = &atu->tbl;
+               }
                entry = ((dma_handle - tbl->table_map_base) >> shift);
-               dma_4v_iommu_demap(&devhandle, entry, npages);
-               iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+               dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
+                                  entry, npages);
+               iommu_tbl_range_free(tbl, dma_handle, npages,
                                     IOMMU_ERROR_CODE);
                sg = sg_next(sg);
        }
@@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
        return cnt;
 }
 
+static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
+{
+       struct atu *atu = pbm->iommu->atu;
+       struct atu_iotsb *iotsb;
+       void *table;
+       u64 table_size;
+       u64 iotsb_num;
+       unsigned long order;
+       unsigned long err;
+
+       iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
+       if (!iotsb) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+       atu->iotsb = iotsb;
+
+       /* calculate size of IOTSB */
+       table_size = (atu->size / IO_PAGE_SIZE) * 8;
+       order = get_order(table_size);
+       table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!table) {
+               err = -ENOMEM;
+               goto table_failed;
+       }
+       iotsb->table = table;
+       iotsb->ra = __pa(table);
+       iotsb->dvma_size = atu->size;
+       iotsb->dvma_base = atu->base;
+       iotsb->table_size = table_size;
+       iotsb->page_size = IO_PAGE_SIZE;
+
+       /* configure and register IOTSB with HV */
+       err = pci_sun4v_iotsb_conf(pbm->devhandle,
+                                  iotsb->ra,
+                                  iotsb->table_size,
+                                  iotsb->page_size,
+                                  iotsb->dvma_base,
+                                  &iotsb_num);
+       if (err) {
+               pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
+               goto iotsb_conf_failed;
+       }
+       iotsb->iotsb_num = iotsb_num;
+
+       err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
+       if (err) {
+               pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
+               goto iotsb_conf_failed;
+       }
+
+       return 0;
+
+iotsb_conf_failed:
+       free_pages((unsigned long)table, order);
+table_failed:
+       kfree(iotsb);
+out_err:
+       return err;
+}
+
+static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
+{
+       struct atu *atu = pbm->iommu->atu;
+       unsigned long err;
+       const u64 *ranges;
+       u64 map_size, num_iotte;
+       u64 dma_mask;
+       const u32 *page_size;
+       int len;
+
+       ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
+                                &len);
+       if (!ranges) {
+               pr_err(PFX "No iommu-address-ranges\n");
+               return -EINVAL;
+       }
+
+       page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
+                                   NULL);
+       if (!page_size) {
+               pr_err(PFX "No iommu-pagesizes\n");
+               return -EINVAL;
+       }
+
+       /* There are 4 iommu-address-ranges supported. Each range is pair of
+        * {base, size}. The ranges[0] and ranges[1] are 32bit address space
+        * while ranges[2] and ranges[3] are 64bit space.  We want to use 64bit
+        * address ranges to support 64bit addressing. Because 'size' for
+        * address ranges[2] and ranges[3] are same we can select either of
+        * ranges[2] or ranges[3] for mapping. However due to 'size' is too
+        * large for OS to allocate IOTSB we are using fix size 32G
+        * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
+        * to share.
+        */
+       atu->ranges = (struct atu_ranges *)ranges;
+       atu->base = atu->ranges[3].base;
+       atu->size = ATU_64_SPACE_SIZE;
+
+       /* Create IOTSB */
+       err = pci_sun4v_atu_alloc_iotsb(pbm);
+       if (err) {
+               pr_err(PFX "Error creating ATU IOTSB\n");
+               return err;
+       }
+
+       /* Create ATU iommu map.
+        * One bit represents one iotte in IOTSB table.
+        */
+       dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
+       num_iotte = atu->size / IO_PAGE_SIZE;
+       map_size = num_iotte / 8;
+       atu->tbl.table_map_base = atu->base;
+       atu->dma_addr_mask = dma_mask;
+       atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
+       if (!atu->tbl.map)
+               return -ENOMEM;
+
+       iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
+                           NULL, false /* no large_pool */,
+                           0 /* default npools */,
+                           false /* want span boundary checking */);
+
+       return 0;
+}
+
 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 {
        static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
 
        pci_sun4v_scan_bus(pbm, &op->dev);
 
+       /* if atu_init fails its not complete failure.
+        * we can still continue using legacy iommu.
+        */
+       if (pbm->iommu->atu) {
+               err = pci_sun4v_atu_init(pbm);
+               if (err) {
+                       kfree(pbm->iommu->atu);
+                       pbm->iommu->atu = NULL;
+                       pr_err(PFX "ATU init failed, err=%d\n", err);
+               }
+       }
+
        pbm->next = pci_pbm_root;
        pci_pbm_root = pbm;
 
@@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op)
        struct pci_pbm_info *pbm;
        struct device_node *dp;
        struct iommu *iommu;
+       struct atu *atu;
        u32 devhandle;
        int i, err = -ENODEV;
+       static bool hv_atu = true;
 
        dp = op->dev.of_node;
 
@@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op)
                pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
                        vpci_major, vpci_minor);
 
+               err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
+               if (err) {
+                       /* don't return an error if we fail to register the
+                        * ATU group, but ATU hcalls won't be available.
+                        */
+                       hv_atu = false;
+                       pr_err(PFX "Could not register hvapi ATU err=%d\n",
+                              err);
+               } else {
+                       pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+                               vatu_major, vatu_minor);
+               }
+
                dma_ops = &sun4v_dma_ops;
        }
 
@@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op)
        }
 
        pbm->iommu = iommu;
+       iommu->atu = NULL;
+       if (hv_atu) {
+               atu = kzalloc(sizeof(*atu), GFP_KERNEL);
+               if (!atu)
+                       pr_err(PFX "Could not allocate atu\n");
+               else
+                       iommu->atu = atu;
+       }
 
        err = pci_sun4v_pbm_init(pbm, op, devhandle);
        if (err)
@@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op)
        return 0;
 
 out_free_iommu:
+       kfree(iommu->atu);
        kfree(pbm->iommu);
 
 out_free_controller:
index 5642212390b2ea7911cea77b1fcc2f6c133f882b..22603a4e48bf1883d3bb72292ec23e10ee6673b3 100644 (file)
@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
                                     unsigned long msinum,
                                     unsigned long valid);
 
+/* Sun4v HV IOMMU v2 APIs */
+unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
+                                  unsigned long ra,
+                                  unsigned long table_size,
+                                  unsigned long page_size,
+                                  unsigned long dvma_base,
+                                  u64 *iotsb_num);
+unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
+                                  unsigned long iotsb_num,
+                                  unsigned int pci_device);
+unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
+                                 unsigned long iotsb_num,
+                                 unsigned long iotsb_index_iottes,
+                                 unsigned long io_attributes,
+                                 unsigned long io_page_list_pa,
+                                 long *mapped);
+unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
+                                   unsigned long iotsb_num,
+                                   unsigned long iotsb_index,
+                                   unsigned long iottes,
+                                   unsigned long *demapped);
 #endif /* !(_PCI_SUN4V_H) */
index e606d46c68159a9c773dfd571fb60b6659882055..578f09657916305b2aab785e2c0eca683863987a 100644 (file)
@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
         mov    %o0, %o0
 ENDPROC(pci_sun4v_msg_setvalid)
 
+       /*
+        * %o0: devhandle
+        * %o1: r_addr
+        * %o2: size
+        * %o3: pagesize
+        * %o4: virt
+        * %o5: &iotsb_num/&iotsb_handle
+        *
+        * returns %o0: status
+        *         %o1: iotsb_num/iotsb_handle
+        */
+ENTRY(pci_sun4v_iotsb_conf)
+       mov     %o5, %g1
+       mov     HV_FAST_PCI_IOTSB_CONF, %o5
+       ta      HV_FAST_TRAP
+       retl
+        stx    %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_conf)
+
+       /*
+        * %o0: devhandle
+        * %o1: iotsb_num/iotsb_handle
+        * %o2: pci_device
+        *
+        * returns %o0: status
+        */
+ENTRY(pci_sun4v_iotsb_bind)
+       mov     HV_FAST_PCI_IOTSB_BIND, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+ENDPROC(pci_sun4v_iotsb_bind)
+
+       /*
+        * %o0: devhandle
+        * %o1: iotsb_num/iotsb_handle
+        * %o2: index_count
+        * %o3: iotte_attributes
+        * %o4: io_page_list_p
+        * %o5: &mapped
+        *
+        * returns %o0: status
+        *         %o1: #mapped
+        */
+ENTRY(pci_sun4v_iotsb_map)
+       mov     %o5, %g1
+       mov     HV_FAST_PCI_IOTSB_MAP, %o5
+       ta      HV_FAST_TRAP
+       retl
+        stx    %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_map)
+
+       /*
+        * %o0: devhandle
+        * %o1: iotsb_num/iotsb_handle
+        * %o2: iotsb_index
+        * %o3: #iottes
+        * %o4: &demapped
+        *
+        * returns %o0: status
+        *         %o1: #demapped
+        */
+ENTRY(pci_sun4v_iotsb_demap)
+       mov     HV_FAST_PCI_IOTSB_DEMAP, %o5
+       ta      HV_FAST_TRAP
+       retl
+        stx    %o1, [%o4]
+ENDPROC(pci_sun4v_iotsb_demap)
index c3c12efe0bc004053fea6b49c2f34e51e1ffc32f..9c0c8fd0b2922cacd2ad6ad81f3238a707286d69 100644 (file)
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
        sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!invalid_frame_pointer(sf, sizeof(*sf)))
+       if (invalid_frame_pointer(sf, sizeof(*sf)))
                goto segv_and_exit;
 
        if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 
        synchronize_user_stack();
        sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
-       if (!invalid_frame_pointer(sf, sizeof(*sf)))
+       if (invalid_frame_pointer(sf, sizeof(*sf)))
                goto segv;
 
        if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
index 439784b7b7ac6e5951a6c1c6ab126dddc5fae915..37aa537b3ad841522d9a13c2b4695edb7b092a28 100644 (file)
@@ -802,8 +802,10 @@ struct mdesc_mblock {
 };
 static struct mdesc_mblock *mblocks;
 static int num_mblocks;
+static int find_numa_node_for_addr(unsigned long pa,
+                                  struct node_mem_mask *pnode_mask);
 
-static unsigned long ra_to_pa(unsigned long addr)
+static unsigned long __init ra_to_pa(unsigned long addr)
 {
        int i;
 
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
        return addr;
 }
 
-static int find_node(unsigned long addr)
+static int __init find_node(unsigned long addr)
 {
+       static bool search_mdesc = true;
+       static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
+       static int last_index;
        int i;
 
        addr = ra_to_pa(addr);
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
                if ((addr & p->mask) == p->val)
                        return i;
        }
-       /* The following condition has been observed on LDOM guests.*/
-       WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
-               " rule. Some physical memory will be owned by node 0.");
-       return 0;
+       /* The following condition has been observed on LDOM guests because
+        * node_masks only contains the best latency mask and value.
+        * LDOM guest's mdesc can contain a single latency group to
+        * cover multiple address range. Print warning message only if the
+        * address cannot be found in node_masks nor mdesc.
+        */
+       if ((search_mdesc) &&
+           ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
+               /* find the available node in the mdesc */
+               last_index = find_numa_node_for_addr(addr, &last_mem_mask);
+               numadbg("find_node: latency group for address 0x%lx is %d\n",
+                       addr, last_index);
+               if ((last_index < 0) || (last_index >= num_node_masks)) {
+                       /* WARN_ONCE() and use default group 0 */
+                       WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
+                       search_mdesc = false;
+                       last_index = 0;
+               }
+       }
+
+       return last_index;
 }
 
-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
 {
        *nid = find_node(start);
        start += PAGE_SIZE;
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
        return numa_latency[from][to];
 }
 
+static int find_numa_node_for_addr(unsigned long pa,
+                                  struct node_mem_mask *pnode_mask)
+{
+       struct mdesc_handle *md = mdesc_grab();
+       u64 node, arc;
+       int i = 0;
+
+       node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+       if (node == MDESC_NODE_NULL)
+               goto out;
+
+       mdesc_for_each_node_by_name(md, node, "group") {
+               mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
+                       u64 target = mdesc_arc_target(md, arc);
+                       struct mdesc_mlgroup *m = find_mlgroup(target);
+
+                       if (!m)
+                               continue;
+                       if ((pa & m->mask) == m->match) {
+                               if (pnode_mask) {
+                                       pnode_mask->mask = m->mask;
+                                       pnode_mask->val = m->match;
+                               }
+                               mdesc_release(md);
+                               return i;
+                       }
+               }
+               i++;
+       }
+
+out:
+       mdesc_release(md);
+       return -1;
+}
+
 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
 {
        int i;
index 178989e6d3e3ae1403ae9dc1f108126fedb6e2c8..ea960d6609177faa86780e6164f26e4d3ef51ac2 100644 (file)
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
  */
 unsigned long long sched_clock(void)
 {
-       return clocksource_cyc2ns(get_cycles(),
-                                 sched_clock_mult, SCHED_CLOCK_SHIFT);
+       return mult_frac(get_cycles(),
+                        sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
 }
 
 int setup_profiling_timer(unsigned int multiplier)
index 536ccfcc01c673c4e3196a4fe57276cf7d7d53a8..34d9e15857c3ba69681038b3ee032c0efa8126f8 100644 (file)
@@ -40,8 +40,8 @@ GCOV_PROFILE := n
 UBSAN_SANITIZE :=n
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
-ifeq ($(CONFIG_RELOCATABLE),y)
-# If kernel is relocatable, build compressed kernel as PIE.
+# Compressed kernel should be built as PIE since it may be loaded at any
+# address by the bootloader.
 ifeq ($(CONFIG_X86_32),y)
 LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
 else
@@ -51,7 +51,6 @@ else
 LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
        && echo "-z noreloc-overflow -pie --no-dynamic-linker")
 endif
-endif
 LDFLAGS_vmlinux := -T
 
 hostprogs-y    := mkpiggy
index 26240dde081e82e696b9a828a191b01c7bdaf0a9..4224ede43b4edc3df93b5e669cc091d73b9186b5 100644 (file)
@@ -87,6 +87,12 @@ int validate_cpu(void)
                return -1;
        }
 
+       if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
+           !has_eflag(X86_EFLAGS_ID)) {
+               printf("This kernel requires a CPU with the CPUID instruction.  Build with CONFIG_M486=y to run on this CPU.\n");
+               return -1;
+       }
+
        if (err_flags) {
                puts("This kernel requires the following features "
                     "not present on the CPU:\n");
index f5f4b3fbbbc2924cbac3fe24d45d949e0997dc8e..afb222b63caeb0217ef34d9b2b193b6b59bd190d 100644 (file)
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
                pr_cont("Fam15h ");
                x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
                break;
-
+       case 0x17:
+               pr_cont("Fam17h ");
+               /*
+                * In family 17h, there are no event constraints in the PMC hardware.
+                * We fallback to using default amd_get_event_constraints.
+                */
+               break;
        default:
                pr_err("core perfctr but no constraints; unknown hardware!\n");
                return -ENODEV;
index d31735f37ed7d0435b8aca5d2f476e04a249b6e8..9d4bf3ab049ec19f8e8dd3c3f859dc310f92c466 100644 (file)
@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
                frame.next_frame     = 0;
                frame.return_address = 0;
 
-               if (!access_ok(VERIFY_READ, fp, 8))
+               if (!valid_user_frame(fp, sizeof(frame)))
                        break;
 
                bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
                if (bytes != 0)
                        break;
 
-               if (!valid_user_frame(fp, sizeof(frame)))
-                       break;
-
                perf_callchain_store(entry, cs_base + frame.return_address);
                fp = compat_ptr(ss_base + frame.next_frame);
        }
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
-               if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
+               if (!valid_user_frame(fp, sizeof(frame)))
                        break;
 
                bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
                if (bytes != 0)
                        break;
 
-               if (!valid_user_frame(fp, sizeof(frame)))
-                       break;
-
                perf_callchain_store(entry, frame.return_address);
                fp = (void __user *)frame.next_frame;
        }
index 0319311dbdbb548eef4f5b2431c285deea5dd00c..be202390bbd37b00106864123a647786497ce2cd 100644 (file)
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
        }
 
        /*
-        * We use the interrupt regs as a base because the PEBS record
-        * does not contain a full regs set, specifically it seems to
-        * lack segment descriptors, which get used by things like
-        * user_mode().
+        * We use the interrupt regs as a base because the PEBS record does not
+        * contain a full regs set, specifically it seems to lack segment
+        * descriptors, which get used by things like user_mode().
         *
-        * In the simple case fix up only the IP and BP,SP regs, for
-        * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
-        * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+        * In the simple case fix up only the IP for PERF_SAMPLE_IP.
+        *
+        * We must however always use BP,SP from iregs for the unwinder to stay
+        * sane; the record BP,SP can point into thin air when the record is
+        * from a previous PMI context or an (I)RET happend between the record
+        * and PMI.
         */
        *regs = *iregs;
        regs->flags = pebs->flags;
        set_linear_ip(regs, pebs->ip);
-       regs->bp = pebs->bp;
-       regs->sp = pebs->sp;
 
        if (sample_type & PERF_SAMPLE_REGS_INTR) {
                regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
                regs->dx = pebs->dx;
                regs->si = pebs->si;
                regs->di = pebs->di;
-               regs->bp = pebs->bp;
-               regs->sp = pebs->sp;
 
-               regs->flags = pebs->flags;
+               /*
+                * Per the above; only set BP,SP if we don't need callchains.
+                *
+                * XXX: does this make sense?
+                */
+               if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+                       regs->bp = pebs->bp;
+                       regs->sp = pebs->sp;
+               }
+
+               /*
+                * Preserve PERF_EFLAGS_VM from set_linear_ip().
+                */
+               regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
 #ifndef CONFIG_X86_32
                regs->r8 = pebs->r8;
                regs->r9 = pebs->r9;
index efca2685d8767582e624d1c38779e5c96eef33b9..dbaaf7dc8373cb0248a4637abe4a211f266fb2db 100644 (file)
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
  */
 static int uncore_pmu_event_init(struct perf_event *event);
 
-static bool is_uncore_event(struct perf_event *event)
+static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
 {
-       return event->pmu->event_init == uncore_pmu_event_init;
+       return &box->pmu->pmu == event->pmu;
 }
 
 static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
 
        n = box->n_events;
 
-       if (is_uncore_event(leader)) {
+       if (is_box_event(box, leader)) {
                box->event_list[n] = leader;
                n++;
        }
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
                return n;
 
        list_for_each_entry(event, &leader->sibling_list, group_entry) {
-               if (!is_uncore_event(event) ||
+               if (!is_box_event(box, event) ||
                    event->state <= PERF_EVENT_STATE_OFF)
                        continue;
 
index 81195cca7eaecb5da2ff7924ea0a0e5d994c27cf..a3dcc12bef4ab3a67aab29c6acf3480920e06952 100644 (file)
@@ -490,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
 
        snb_uncore_imc_event_start(event, 0);
 
-       box->n_events++;
-
        return 0;
 }
 
 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
 {
-       struct intel_uncore_box *box = uncore_event_to_box(event);
-       int i;
-
        snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
-
-       for (i = 0; i < box->n_events; i++) {
-               if (event == box->event_list[i]) {
-                       --box->n_events;
-                       break;
-               }
-       }
 }
 
 int snb_pci2phy_map_init(int devid)
index 5874d8de1f8da111e3e0a682835e42b23af875ff..a77ee026643d23fac4808a1f1c32ca42e4999769 100644 (file)
@@ -113,7 +113,7 @@ struct debug_store {
  * Per register state.
  */
 struct er_account {
-       raw_spinlock_t          lock;   /* per-core: protect structure */
+       raw_spinlock_t      lock;       /* per-core: protect structure */
        u64                 config;     /* extra MSR config */
        u64                 reg;        /* extra MSR number */
        atomic_t            ref;        /* reference count */
index 9b7cf5c28f5fa8557d23083047995bd93af1a68a..85f854b98a9d24c3e0e6a3d9d83fd6c5b6c57e3f 100644 (file)
@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
                for (; stack < stack_info.end; stack++) {
                        unsigned long real_addr;
                        int reliable = 0;
-                       unsigned long addr = *stack;
+                       unsigned long addr = READ_ONCE_NOCHECK(*stack);
                        unsigned long *ret_addr_p =
                                unwind_get_return_address_ptr(&state);
 
index 47004010ad5dd42ec03e5ca075d832ef0c925e38..ebb4e95fbd741b5842b6c4a6320688f99bcf541a 100644 (file)
@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
 
-       if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
-               /* FPU state will be reallocated lazily at the first use. */
-               fpu__drop(fpu);
-       } else {
-               if (!fpu->fpstate_active) {
-                       fpu__activate_curr(fpu);
-                       user_fpu_begin();
-               }
+       fpu__drop(fpu);
+
+       /*
+        * Make sure fpstate is cleared and initialized.
+        */
+       if (static_cpu_has(X86_FEATURE_FPU)) {
+               fpu__activate_curr(fpu);
+               user_fpu_begin();
                copy_init_fpstate_to_fpregs();
        }
 }
index b6b2f0264af36ac272537e5d6689a38361bb60d4..2dabea46f03935f435493117c058a297f5011878 100644 (file)
@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
 initial_pg_pmd:
        .fill 1024*KPMDS,4,0
 #else
-ENTRY(initial_page_table)
+.globl initial_page_table
+initial_page_table:
        .fill 1024,4,0
 #endif
 initial_pg_fixmap:
        .fill 1024,4,0
-ENTRY(empty_zero_page)
+.globl empty_zero_page
+empty_zero_page:
        .fill 4096,1,0
-ENTRY(swapper_pg_dir)
+.globl swapper_pg_dir
+swapper_pg_dir:
        .fill 1024,4,0
 EXPORT_SYMBOL(empty_zero_page)
 
index 764a29f84de7feea6346ce1d822af64f14af63ff..85195d447a922785857db0caacc73d6bc9b9490c 100644 (file)
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
 {
        struct platform_device *pd;
        struct resource res;
-       unsigned long len;
+       u64 base, size;
+       u32 length;
 
-       /* don't use lfb_size as it may contain the whole VMEM instead of only
-        * the part that is occupied by the framebuffer */
-       len = mode->height * mode->stride;
-       len = PAGE_ALIGN(len);
-       if (len > (u64)si->lfb_size << 16) {
+       /*
+        * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
+        * upper half of the base address. Assemble the address, then make sure
+        * it is valid and we can actually access it.
+        */
+       base = si->lfb_base;
+       if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+               base |= (u64)si->ext_lfb_base << 32;
+       if (!base || (u64)(resource_size_t)base != base) {
+               printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Don't use lfb_size as IORESOURCE size, since it may contain the
+        * entire VMEM, and thus require huge mappings. Use just the part we
+        * need, that is, the part where the framebuffer is located. But verify
+        * that it does not exceed the advertised VMEM.
+        * Note that in case of VBE, the lfb_size is shifted by 16 bits for
+        * historical reasons.
+        */
+       size = si->lfb_size;
+       if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+               size <<= 16;
+       length = mode->height * mode->stride;
+       length = PAGE_ALIGN(length);
+       if (length > size) {
                printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
                return -EINVAL;
        }
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
        memset(&res, 0, sizeof(res));
        res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        res.name = simplefb_resname;
-       res.start = si->lfb_base;
-       res.end = si->lfb_base + len - 1;
+       res.start = base;
+       res.end = res.start + length - 1;
        if (res.end <= res.start)
                return -EINVAL;
 
index 2d721e533cf48f1114b2d6cc8cacab07b2b31ebb..b80e8bf43cc63b4350185ab1125ee85f1c43d05f 100644 (file)
@@ -7,11 +7,13 @@
 
 unsigned long unwind_get_return_address(struct unwind_state *state)
 {
+       unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
        if (unwind_done(state))
                return 0;
 
        return ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                    *state->sp, state->sp);
+                                    addr, state->sp);
 }
 EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
                return false;
 
        do {
+               unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
                for (state->sp++; state->sp < info->end; state->sp++)
-                       if (__kernel_text_address(*state->sp))
+                       if (__kernel_text_address(addr))
                                return true;
 
                state->sp = info->next_sp;
index 79ae939970d3f49065fa4f0ed3dcc6d769ddf48a..fcd06f7526de31a6cd429e6d800ea93fcac294f9 100644 (file)
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
        if (early_recursion_flag > 2)
                goto halt_loop;
 
-       if (regs->cs != __KERNEL_CS)
+       /*
+        * Old CPUs leave the high bits of CS on the stack
+        * undefined.  I'm not sure which CPUs do this, but at least
+        * the 486 DX works this way.
+        */
+       if ((regs->cs & 0xFFFF) != __KERNEL_CS)
                goto fail;
 
        /*
index 429d08be7848a2df6334aef7e5e2c4e79e4b6d4a..dd6cfa4ad3ac35713da9c93951bd65bd70106a58 100644 (file)
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 # MISC Devices
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
new file mode 100644 (file)
index 0000000..3f1f1c7
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Intel Merrifield watchdog platform device library file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/io_apic.h>
+
+#define TANGIER_EXT_TIMER0_MSI 15
+
+static struct platform_device wdt_dev = {
+       .name = "intel_mid_wdt",
+       .id = -1,
+};
+
+static int tangier_probe(struct platform_device *pdev)
+{
+       int gsi;
+       struct irq_alloc_info info;
+       struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+
+       if (!pdata)
+               return -EINVAL;
+
+       /* IOAPIC builds identity mapping between GSI and IRQ on MID */
+       gsi = pdata->irq;
+       ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
+       if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
+               dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
+                        gsi);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct intel_mid_wdt_pdata tangier_pdata = {
+       .irq = TANGIER_EXT_TIMER0_MSI,
+       .probe = tangier_probe,
+};
+
+static int wdt_scu_status_change(struct notifier_block *nb,
+                                unsigned long code, void *data)
+{
+       if (code == SCU_DOWN) {
+               platform_device_unregister(&wdt_dev);
+               return 0;
+       }
+
+       return platform_device_register(&wdt_dev);
+}
+
+static struct notifier_block wdt_scu_notifier = {
+       .notifier_call  = wdt_scu_status_change,
+};
+
+static int __init register_mid_wdt(void)
+{
+       if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+               return -ENODEV;
+
+       wdt_dev.dev.platform_data = &tangier_pdata;
+
+       /*
+        * We need to be sure that the SCU IPC is ready before watchdog device
+        * can be registered:
+        */
+       intel_scu_notifier_add(&wdt_scu_notifier);
+
+       return 0;
+}
+rootfs_initcall(register_mid_wdt);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
deleted file mode 100644 (file)
index de73413..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * platform_wdt.c: Watchdog platform library file
- *
- * (C) Copyright 2014 Intel Corporation
- * Author: David Cohen <david.a.cohen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/intel-mid_wdt.h>
-#include <asm/intel-mid.h>
-#include <asm/io_apic.h>
-
-#define TANGIER_EXT_TIMER0_MSI 15
-
-static struct platform_device wdt_dev = {
-       .name = "intel_mid_wdt",
-       .id = -1,
-};
-
-static int tangier_probe(struct platform_device *pdev)
-{
-       int gsi;
-       struct irq_alloc_info info;
-       struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
-
-       if (!pdata)
-               return -EINVAL;
-
-       /* IOAPIC builds identity mapping between GSI and IRQ on MID */
-       gsi = pdata->irq;
-       ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
-       if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
-               dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
-                        gsi);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static struct intel_mid_wdt_pdata tangier_pdata = {
-       .irq = TANGIER_EXT_TIMER0_MSI,
-       .probe = tangier_probe,
-};
-
-static int __init register_mid_wdt(void)
-{
-       if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
-               wdt_dev.dev.platform_data = &tangier_pdata;
-               return platform_device_register(&wdt_dev);
-       }
-
-       return -ENODEV;
-}
-
-rootfs_initcall(register_mid_wdt);
index 05e21b46443300e76a0663d37c4a3cb071254270..d19b09cdf284d93dc63820a7cfc648217b220a7d 100644 (file)
@@ -214,7 +214,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
        ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
 
-       if (!result) {
+       if (!result && !ctx->more) {
                err = af_alg_wait_for_completion(
                                crypto_ahash_init(&ctx->req),
                                &ctx->completion);
index 52ce17a3dd63079c3f5bb6eacde40ec1a61aa2ab..c16c94f88733e738f4ee1dff7a1334f9c2e39eb4 100644 (file)
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
 
        sg = scatterwalk_ffwd(tmp, sg, start);
 
-       if (sg_page(sg) == virt_to_page(buf) &&
-           sg->offset == offset_in_page(buf))
-               return;
-
        scatterwalk_start(&walk, sg);
        scatterwalk_copychunks(buf, &walk, nbytes, out);
        scatterwalk_done(&walk, out, 0);
index edf3b96b3b737f0ec0aad0661bc0ded04177e58f..1d99292e2039ee5ff6e187e4e0cdbddf85d860e1 100644 (file)
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
        }
 
        /* register clk-provider */
-       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 
        return;
 
index 0718e831475fdace5f7bd0c06190147da1fb3870..3b784b593afde7fea97650f938b03c55c79d8ac0 100644 (file)
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
        }
 
        /* register clk-provider */
-       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 
        return;
 
index 8802a2dd56ac415c1576d62eb8a0bff0c81c0f9e..f674778fb3ac5912e05fb7448c6b1fb21eefa3d8 100644 (file)
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
        hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
                        "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
 
-       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 }
 CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
index 79596463e0d94b66a3135e7fe60d07cfcd023ad3..4a82a49cff5e604a290b395a84614fc5158dd27d 100644 (file)
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
 static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
                           0x050, 0, 3, axi_div_table, 0);
 
+#define SUN6I_A31_AHB1_REG  0x054
+
 static const char * const ahb1_parents[] = { "osc32k", "osc24M",
                                             "axi", "pll-periph" };
 
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
        val &= BIT(16);
        writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
 
+       /* Force AHB1 to PLL6 / 3 */
+       val = readl(reg + SUN6I_A31_AHB1_REG);
+       /* set PLL6 pre-div = 3 */
+       val &= ~GENMASK(7, 6);
+       val |= 0x2 << 6;
+       /* select PLL6 / pre-div */
+       val &= ~GENMASK(13, 12);
+       val |= 0x3 << 12;
+       writel(val, reg + SUN6I_A31_AHB1_REG);
+
        sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
 
        ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
index 838b22aa8b67fbabdfef5386485d5c3316adf296..f2c9274b8bd570c586b56d10ec17e829e6a710c7 100644 (file)
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
        else
                calcp = 3;
 
-       calcm = (req->parent_rate >> calcp) - 1;
+       calcm = (div >> calcp) - 1;
 
        req->rate = (req->parent_rate >> calcp) / (calcm + 1);
        req->m = calcm;
index dae35a96a694d0b6ffc3de5aae94e7344a69e565..02ca5dd978f664ad31cbe248674514e707ce143f 100644 (file)
@@ -34,6 +34,7 @@ struct amdgpu_atpx {
 
 static struct amdgpu_atpx_priv {
        bool atpx_detected;
+       bool bridge_pm_usable;
        /* handle for device - and atpx */
        acpi_handle dhandle;
        acpi_handle other_handle;
@@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
        atpx->is_hybrid = false;
        if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
                printk("ATPX Hybrid Graphics\n");
-               atpx->functions.power_cntl = false;
+               /*
+                * Disable legacy PM methods only when pcie port PM is usable,
+                * otherwise the device might fail to power off or power on.
+                */
+               atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
                atpx->is_hybrid = true;
        }
 
@@ -480,6 +485,7 @@ static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
  */
 static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
+       struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
        acpi_handle dhandle, atpx_handle;
        acpi_status status;
 
@@ -494,6 +500,7 @@ static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
        }
        amdgpu_atpx_priv.dhandle = dhandle;
        amdgpu_atpx_priv.atpx.handle = atpx_handle;
+       amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
        return true;
 }
 
index 13f2b705ea49811269ec409d476f4835c1efaa03..08cd0bd3ebe5b1e34b14940a4cff4805027518e1 100644 (file)
@@ -2984,19 +2984,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
        if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
                data->highest_mclk = memory_clock;
 
-       performance_level = &(ps->performance_levels
-                       [ps->performance_level_count++]);
-
        PP_ASSERT_WITH_CODE(
                        (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
                        "Performance levels exceeds SMC limit!",
                        return -EINVAL);
 
        PP_ASSERT_WITH_CODE(
-                       (ps->performance_level_count <=
+                       (ps->performance_level_count <
                                        hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-                       "Performance levels exceeds Driver limit!",
-                       return -EINVAL);
+                       "Performance levels exceeds Driver limit, Skip!",
+                       return 0);
+
+       performance_level = &(ps->performance_levels
+                       [ps->performance_level_count++]);
 
        /* Performance levels are arranged from low to high. */
        performance_level->memory_clock = memory_clock;
index 48019ae22ddba5fcbb3283e94a016543fba3d8b3..28341b32067f89e347c13db6f6a4b97e94022b96 100644 (file)
@@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
        clk_prepare_enable(hdlcd->clk);
        hdlcd_crtc_mode_set_nofb(crtc);
        hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+       drm_crtc_vblank_on(crtc);
 }
 
 static void hdlcd_crtc_disable(struct drm_crtc *crtc)
 {
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
-       if (!crtc->state->active)
-               return;
-
+       drm_crtc_vblank_off(crtc);
        hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
        clk_disable_unprepare(hdlcd->clk);
 }
index e8fb6ef947eea388ac0425054c6ce928bf453a5d..38eaa63afb31f61125314893d154ec03171cb3e1 100644 (file)
@@ -1907,6 +1907,8 @@ err_disable_pm_runtime:
 err_hdmiphy:
        if (hdata->hdmiphy_port)
                put_device(&hdata->hdmiphy_port->dev);
+       if (hdata->regs_hdmiphy)
+               iounmap(hdata->regs_hdmiphy);
 err_ddc:
        put_device(&hdata->ddc_adpt->dev);
 
@@ -1929,6 +1931,9 @@ static int hdmi_remove(struct platform_device *pdev)
        if (hdata->hdmiphy_port)
                put_device(&hdata->hdmiphy_port->dev);
 
+       if (hdata->regs_hdmiphy)
+               iounmap(hdata->regs_hdmiphy);
+
        put_device(&hdata->ddc_adpt->dev);
 
        return 0;
index f75c5b5a536c7a83cac6119a1d8a255792712172..c70310206ac56fd76eeb93ae76af669f1f2870a7 100644 (file)
@@ -251,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
 
-       ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
-                              IRQF_TRIGGER_NONE, dev_name(dev), priv);
-       if (ret < 0) {
-               dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
-               return ret;
-       }
-
        comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
        if (comp_id < 0) {
                dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
@@ -273,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
+       ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+                              IRQF_TRIGGER_NONE, dev_name(dev), priv);
+       if (ret < 0) {
+               dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+               return ret;
+       }
+
        ret = component_add(dev, &mtk_disp_ovl_component_ops);
        if (ret)
                dev_err(dev, "Failed to add component: %d\n", ret);
index df33b3ca6ffd5b2038e3e01d81ada28ffd33462e..48cc01fd20c78db1533ff8da0edf6c76e7f163e4 100644 (file)
@@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
                          unsigned int bpc)
 {
        writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
-       writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+       writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
        mtk_dither_set(comp, bpc, DISP_OD_CFG);
 }
 
index 28b2044ed9f285ad7c1bf1441822c3bcd24d9196..eaa5a2240c0c9db36b3ca918e589e117aee01077 100644 (file)
@@ -86,7 +86,7 @@
 
 #define DSI_PHY_TIMECON0       0x110
 #define LPX                            (0xff << 0)
-#define HS_PRPR                                (0xff << 8)
+#define HS_PREP                                (0xff << 8)
 #define HS_ZERO                                (0xff << 16)
 #define HS_TRAIL                       (0xff << 24)
 
 #define CLK_TRAIL                      (0xff << 24)
 
 #define DSI_PHY_TIMECON3       0x11c
-#define CLK_HS_PRPR                    (0xff << 0)
+#define CLK_HS_PREP                    (0xff << 0)
 #define CLK_HS_POST                    (0xff << 8)
 #define CLK_HS_EXIT                    (0xff << 16)
 
+#define T_LPX          5
+#define T_HS_PREP      6
+#define T_HS_TRAIL     8
+#define T_HS_EXIT      7
+#define T_HS_ZERO      10
+
 #define NS_TO_CYCLE(n, c)    ((n) / (c) + (((n) % (c)) ? 1 : 0))
 
 struct phy;
@@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
 static void dsi_phy_timconfig(struct mtk_dsi *dsi)
 {
        u32 timcon0, timcon1, timcon2, timcon3;
-       unsigned int ui, cycle_time;
-       unsigned int lpx;
+       u32 ui, cycle_time;
 
        ui = 1000 / dsi->data_rate + 0x01;
        cycle_time = 8000 / dsi->data_rate + 0x01;
-       lpx = 5;
 
-       timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx;
-       timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 |
-                 (4 * lpx);
+       timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
+       timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
+                 T_HS_EXIT << 24;
        timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
                  (NS_TO_CYCLE(0x150, cycle_time) << 16);
-       timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 |
-                  NS_TO_CYCLE(0x40, cycle_time);
+       timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
+                 NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
 
        writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
        writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
 {
        struct device *dev = dsi->dev;
        int ret;
+       u64 pixel_clock, total_bits;
+       u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
 
        if (++dsi->refcount != 1)
                return 0;
 
+       switch (dsi->format) {
+       case MIPI_DSI_FMT_RGB565:
+               bit_per_pixel = 16;
+               break;
+       case MIPI_DSI_FMT_RGB666_PACKED:
+               bit_per_pixel = 18;
+               break;
+       case MIPI_DSI_FMT_RGB666:
+       case MIPI_DSI_FMT_RGB888:
+       default:
+               bit_per_pixel = 24;
+               break;
+       }
+
        /**
-        * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio;
-        * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000.
-        * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi.
-        * we set mipi_ratio is 1.05.
+        * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+        * htotal_time = htotal * byte_per_pixel / num_lanes
+        * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+        * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+        * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
         */
-       dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10);
+       pixel_clock = dsi->vm.pixelclock * 1000;
+       htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+                       dsi->vm.hsync_len;
+       htotal_bits = htotal * bit_per_pixel;
+
+       overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+                       T_HS_EXIT;
+       overhead_bits = overhead_cycles * dsi->lanes * 8;
+       total_bits = htotal_bits + overhead_bits;
+
+       dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+                                         htotal * dsi->lanes);
 
-       ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000);
+       ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
        if (ret < 0) {
                dev_err(dev, "Failed to set data rate: %d\n", ret);
                goto err_refcount;
index 2fdcd04bc93f7b9c6abf5d84752836e154d566b0..4129b12521a67e9296582353a74f418bbf84d1c6 100644 (file)
@@ -34,6 +34,7 @@ struct radeon_atpx {
 
 static struct radeon_atpx_priv {
        bool atpx_detected;
+       bool bridge_pm_usable;
        /* handle for device - and atpx */
        acpi_handle dhandle;
        struct radeon_atpx atpx;
@@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
        atpx->is_hybrid = false;
        if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
                printk("ATPX Hybrid Graphics\n");
-               atpx->functions.power_cntl = false;
+               /*
+                * Disable legacy PM methods only when pcie port PM is usable,
+                * otherwise the device might fail to power off or power on.
+                */
+               atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
                atpx->is_hybrid = true;
        }
 
@@ -474,6 +479,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
  */
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
+       struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
        acpi_handle dhandle, atpx_handle;
        acpi_status status;
 
@@ -487,6 +493,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 
        radeon_atpx_priv.dhandle = dhandle;
        radeon_atpx_priv.atpx.handle = atpx_handle;
+       radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
        return true;
 }
 
index 086d8a50715789d2237d19a59d8e24f407d35e23..60d30203a5faf9c7b69dcddb09ec0a086ca96a08 100644 (file)
 #include <linux/usb/ch9.h>
 #include "hid-ids.h"
 
+#define CP2112_REPORT_MAX_LENGTH               64
+#define CP2112_GPIO_CONFIG_LENGTH              5
+#define CP2112_GPIO_GET_LENGTH                 2
+#define CP2112_GPIO_SET_LENGTH                 3
+
 enum {
        CP2112_GPIO_CONFIG              = 0x02,
        CP2112_GPIO_GET                 = 0x03,
@@ -161,6 +166,8 @@ struct cp2112_device {
        atomic_t read_avail;
        atomic_t xfer_avail;
        struct gpio_chip gc;
+       u8 *in_out_buffer;
+       spinlock_t lock;
 };
 
 static int gpio_push_pull = 0xFF;
@@ -171,62 +178,86 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
-       u8 buf[5];
+       u8 *buf = dev->in_out_buffer;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&dev->lock, flags);
+
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
-                                      sizeof(buf), HID_FEATURE_REPORT,
-                                      HID_REQ_GET_REPORT);
-       if (ret != sizeof(buf)) {
+                                CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+                                HID_REQ_GET_REPORT);
+       if (ret != CP2112_GPIO_CONFIG_LENGTH) {
                hid_err(hdev, "error requesting GPIO config: %d\n", ret);
-               return ret;
+               goto exit;
        }
 
        buf[1] &= ~(1 << offset);
        buf[2] = gpio_push_pull;
 
-       ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
-                                HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+       ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+                                CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+                                HID_REQ_SET_REPORT);
        if (ret < 0) {
                hid_err(hdev, "error setting GPIO config: %d\n", ret);
-               return ret;
+               goto exit;
        }
 
-       return 0;
+       ret = 0;
+
+exit:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return ret <= 0 ? ret : -EIO;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
-       u8 buf[3];
+       u8 *buf = dev->in_out_buffer;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&dev->lock, flags);
+
        buf[0] = CP2112_GPIO_SET;
        buf[1] = value ? 0xff : 0;
        buf[2] = 1 << offset;
 
-       ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf),
-                                HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+       ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
+                                CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
+                                HID_REQ_SET_REPORT);
        if (ret < 0)
                hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+       spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
-       u8 buf[2];
+       u8 *buf = dev->in_out_buffer;
+       unsigned long flags;
        int ret;
 
-       ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf),
-                                      HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
-       if (ret != sizeof(buf)) {
+       spin_lock_irqsave(&dev->lock, flags);
+
+       ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+                                CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+                                HID_REQ_GET_REPORT);
+       if (ret != CP2112_GPIO_GET_LENGTH) {
                hid_err(hdev, "error requesting GPIO values: %d\n", ret);
-               return ret;
+               ret = ret < 0 ? ret : -EIO;
+               goto exit;
        }
 
-       return (buf[1] >> offset) & 1;
+       ret = (buf[1] >> offset) & 1;
+
+exit:
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       return ret;
 }
 
 static int cp2112_gpio_direction_output(struct gpio_chip *chip,
@@ -234,27 +265,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 {
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
-       u8 buf[5];
+       u8 *buf = dev->in_out_buffer;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&dev->lock, flags);
+
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
-                                      sizeof(buf), HID_FEATURE_REPORT,
-                                      HID_REQ_GET_REPORT);
-       if (ret != sizeof(buf)) {
+                                CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+                                HID_REQ_GET_REPORT);
+       if (ret != CP2112_GPIO_CONFIG_LENGTH) {
                hid_err(hdev, "error requesting GPIO config: %d\n", ret);
-               return ret;
+               goto fail;
        }
 
        buf[1] |= 1 << offset;
        buf[2] = gpio_push_pull;
 
-       ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
-                                HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+       ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+                                CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+                                HID_REQ_SET_REPORT);
        if (ret < 0) {
                hid_err(hdev, "error setting GPIO config: %d\n", ret);
-               return ret;
+               goto fail;
        }
 
+       spin_unlock_irqrestore(&dev->lock, flags);
+
        /*
         * Set gpio value when output direction is already set,
         * as specified in AN495, Rev. 0.2, cpt. 4.4
@@ -262,6 +299,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        cp2112_gpio_set(chip, offset, value);
 
        return 0;
+
+fail:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return ret < 0 ? ret : -EIO;
 }
 
 static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1007,6 +1048,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct cp2112_smbus_config_report config;
        int ret;
 
+       dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
+                                         GFP_KERNEL);
+       if (!dev->in_out_buffer)
+               return -ENOMEM;
+
+       spin_lock_init(&dev->lock);
+
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
@@ -1063,12 +1115,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_power_normal;
        }
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev) {
-               ret = -ENOMEM;
-               goto err_power_normal;
-       }
-
        hid_set_drvdata(hdev, (void *)dev);
        dev->hdev               = hdev;
        dev->adap.owner         = THIS_MODULE;
@@ -1087,7 +1133,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        if (ret) {
                hid_err(hdev, "error registering i2c adapter\n");
-               goto err_free_dev;
+               goto err_power_normal;
        }
 
        hid_dbg(hdev, "adapter registered\n");
@@ -1123,8 +1169,6 @@ err_gpiochip_remove:
        gpiochip_remove(&dev->gc);
 err_free_i2c:
        i2c_del_adapter(&dev->adap);
-err_free_dev:
-       kfree(dev);
 err_power_normal:
        hid_hw_power(hdev, PM_HINT_NORMAL);
 err_hid_close:
@@ -1149,7 +1193,6 @@ static void cp2112_remove(struct hid_device *hdev)
         */
        hid_hw_close(hdev);
        hid_hw_stop(hdev);
-       kfree(dev);
 }
 
 static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
index 76f644deb0a75cab6d6810517d4196465be18314..c5c5fbe9d60577f44085d86a7fb5cf60efb6acd3 100644 (file)
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        /* Setup wireless link with Logitech Wii wheel */
        if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
-               unsigned char buf[] = { 0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+               const unsigned char cbuf[] = { 0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+               u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
 
-               ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
-                                       HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto err_free;
+               }
 
+               ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
+                                       HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
                if (ret >= 0) {
                        /* insert a little delay of 10 jiffies ~ 40ms */
                        wait_queue_head_t wait;
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
                        buf[1] = 0xB2;
                        get_random_bytes(&buf[2], 2);
 
-                       ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+                       ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
                                        HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
                }
+               kfree(buf);
        }
 
        if (drv_data->quirks & LG_FF)
index d6fa496d0ca25c17035233315ba9f5d5ebddc4fd..20b40ad2632503754685b84cc07d8787a4a44515 100644 (file)
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev,
 static int magicmouse_probe(struct hid_device *hdev,
        const struct hid_device_id *id)
 {
-       __u8 feature[] = { 0xd7, 0x01 };
+       const u8 feature[] = { 0xd7, 0x01 };
+       u8 *buf;
        struct magicmouse_sc *msc;
        struct hid_report *report;
        int ret;
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
+       buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto err_stop_hw;
+       }
+
        /*
         * Some devices repond with 'invalid report id' when feature
         * report switching it into multitouch mode is sent to it.
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev,
         * but there seems to be no other way of switching the mode.
         * Thus the super-ugly hacky success check below.
         */
-       ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+       ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
                                HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+       kfree(buf);
        if (ret != -EIO && ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
index 9cd2ca34a6be5583dbcd82a64feb018f66b20bf8..be89bcbf6a71b23f266c8bb0b38b0aa66cca1ae0 100644 (file)
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page)
 static int rmi_set_mode(struct hid_device *hdev, u8 mode)
 {
        int ret;
-       u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+       const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+       u8 *buf;
 
-       ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf,
+       buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
                        sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+       kfree(buf);
        if (ret < 0) {
                dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
                        ret);
index c5c3d6111729952a7c35ce37837e1289c6c00a58..60875625cbdff45725532b92a395fe7f329163ee 100644 (file)
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        __s32 value;
        int ret = 0;
 
+       memset(buffer, 0, buffer_size);
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield)) {
index 317ef63ee78999d673f85b62af14bed7e664549e..8d96a22647b396c5a8790775a883ff58eabce7ce 100644 (file)
@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
        int i;
        tuner_dbg("%s called\n", __func__);
 
+       /* free allocated f/w string */
+       if (priv->fname != firmware_name)
+               kfree(priv->fname);
+       priv->fname = NULL;
+
+       priv->state = XC2028_NO_FIRMWARE;
+       memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
        if (!priv->firm)
                return;
 
@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
 
        priv->firm = NULL;
        priv->firm_size = 0;
-       priv->state = XC2028_NO_FIRMWARE;
-
-       memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
 }
 
 static int load_all_firmwares(struct dvb_frontend *fe,
@@ -884,9 +889,8 @@ read_not_reliable:
        return 0;
 
 fail:
-       priv->state = XC2028_NO_FIRMWARE;
+       free_firmware(priv);
 
-       memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
        if (retry_count < 8) {
                msleep(50);
                retry_count++;
@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
        mutex_lock(&xc2028_list_mutex);
 
        /* only perform final cleanup if this is the last instance */
-       if (hybrid_tuner_report_instance_count(priv) == 1) {
+       if (hybrid_tuner_report_instance_count(priv) == 1)
                free_firmware(priv);
-               kfree(priv->ctrl.fname);
-               priv->ctrl.fname = NULL;
-       }
 
        if (priv)
                hybrid_tuner_release_state(priv);
@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
 
        /*
         * Copy the config data.
-        * For the firmware name, keep a local copy of the string,
-        * in order to avoid troubles during device release.
         */
-       kfree(priv->ctrl.fname);
-       priv->ctrl.fname = NULL;
        memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
-       if (p->fname) {
-               priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
-               if (priv->ctrl.fname == NULL) {
-                       rc = -ENOMEM;
-                       goto unlock;
-               }
-       }
 
        /*
         * If firmware name changed, frees firmware. As free_firmware will
@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
 
        if (priv->state == XC2028_NO_FIRMWARE) {
                if (!firmware_name[0])
-                       priv->fname = priv->ctrl.fname;
+                       priv->fname = kstrdup(p->fname, GFP_KERNEL);
                else
                        priv->fname = firmware_name;
 
+               if (!priv->fname) {
+                       rc = -ENOMEM;
+                       goto unlock;
+               }
+
                rc = request_firmware_nowait(THIS_MODULE, 1,
                                             priv->fname,
                                             priv->i2c_props.adap->dev.parent,
index 50a674be665586af87750896f7f01924d76a6e66..df478ae72e23235ca8939128b2ad37497aaf4b71 100644 (file)
@@ -1058,6 +1058,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
        spin_unlock_irqrestore(&host->irq_lock, irqflags);
 
        if (host->dma_ops->start(host, sg_len)) {
+               host->dma_ops->stop(host);
                /* We can't do DMA, try PIO for this one */
                dev_dbg(host->dev,
                        "%s: fall back to PIO mode for current transfer\n",
index fb71c866eacc7028918e1abb667b230e784a47f4..1bb11e4a9fe53f7e01eb81bb87482f6bd96a56f1 100644 (file)
@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
                        return ret;
                }
        }
+       /*
+        * The DAT[3:0] line signal levels and the CMD line signal level are
+        * not compatible with standard SDHC register. The line signal levels
+        * DAT[7:0] are at bits 31:24 and the command line signal level is at
+        * bit 23. All other bits are the same as in the standard SDHC
+        * register.
+        */
+       if (spec_reg == SDHCI_PRESENT_STATE) {
+               ret = value & 0x000fffff;
+               ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
+               ret |= (value << 1) & SDHCI_CMD_LVL;
+               return ret;
+       }
+
        ret = value;
        return ret;
 }
index 766df17fb7eb039a8f6ef85cb9e44d6186ad9995..2570455b219a469c1669ff4d3f9935ca89e37c6f 100644 (file)
@@ -73,6 +73,7 @@
 #define  SDHCI_DATA_LVL_MASK   0x00F00000
 #define   SDHCI_DATA_LVL_SHIFT 20
 #define   SDHCI_DATA_0_LVL_MASK        0x00100000
+#define  SDHCI_CMD_LVL         0x01000000
 
 #define SDHCI_HOST_CONTROL     0x28
 #define  SDHCI_CTRL_LED                0x01
index 7717b19dc806bf1532ac263525a35ce643c7cdd6..947adda3397d64ce9e86f5cfe8e300b4e8650827 100644 (file)
@@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
 
                vl->members |= BIT(port) | BIT(cpu_port);
                if (untagged)
-                       vl->untag |= BIT(port) | BIT(cpu_port);
+                       vl->untag |= BIT(port);
                else
-                       vl->untag &= ~(BIT(port) | BIT(cpu_port));
+                       vl->untag &= ~BIT(port);
+               vl->untag &= ~BIT(cpu_port);
 
                b53_set_vlan_entry(dev, vid, vl);
                b53_fast_age_vlan(dev, vid);
@@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
        if (pvid) {
                b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
                            vlan->vid_end);
-               b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
-                           vlan->vid_end);
                b53_fast_age_vlan(dev, vid);
        }
 }
@@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
 {
        struct b53_device *dev = ds->priv;
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-       unsigned int cpu_port = dev->cpu_port;
        struct b53_vlan *vl;
        u16 vid;
        u16 pvid;
@@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
                b53_get_vlan_entry(dev, vid, vl);
 
                vl->members &= ~BIT(port);
-               if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
-                       vl->members = 0;
 
                if (pvid == vid) {
                        if (is5325(dev) || is5365(dev))
@@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
                                pvid = 0;
                }
 
-               if (untagged) {
+               if (untagged)
                        vl->untag &= ~(BIT(port));
-                       if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
-                               vl->untag = 0;
-               }
 
                b53_set_vlan_entry(dev, vid, vl);
                b53_fast_age_vlan(dev, vid);
        }
 
        b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
-       b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
        b53_fast_age_vlan(dev, pvid);
 
        return 0;
index b0da9693f28a130a65e59a007548c773fa422e7f..be865b4dada2c65e7089ca4147ef47758698dc51 100644 (file)
@@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
                if (ndev->flags & IFF_ALLMULTI) {
                        arc_reg_set(priv, R_LAFL, ~0);
                        arc_reg_set(priv, R_LAFH, ~0);
-               } else {
+               } else if (ndev->flags & IFF_MULTICAST) {
                        struct netdev_hw_addr *ha;
                        unsigned int filter[2] = { 0, 0 };
                        int bit;
@@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
 
                        arc_reg_set(priv, R_LAFL, filter[0]);
                        arc_reg_set(priv, R_LAFH, filter[1]);
+               } else {
+                       arc_reg_set(priv, R_LAFL, 0);
+                       arc_reg_set(priv, R_LAFH, 0);
                }
        }
 }
@@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
        ndev->netdev_ops = &arc_emac_netdev_ops;
        ndev->ethtool_ops = &arc_emac_ethtool_ops;
        ndev->watchdog_timeo = TX_TIMEOUT;
-       /* FIXME :: no multicast support yet */
-       ndev->flags &= ~IFF_MULTICAST;
 
        priv = netdev_priv(ndev);
        priv->dev = dev;
index c6909660e097b010fe30c3371fefa253fd71e5d5..e18635b2a002d505be7bf330793f364b49c2eb0d 100644 (file)
@@ -4934,6 +4934,10 @@ static void bnxt_del_napi(struct bnxt *bp)
                napi_hash_del(&bnapi->napi);
                netif_napi_del(&bnapi->napi);
        }
+       /* We called napi_hash_del() before netif_napi_del(), we need
+        * to respect an RCU grace period before freeing napi structures.
+        */
+       synchronize_net();
 }
 
 static void bnxt_init_napi(struct bnxt *bp)
index b32444a3ed79d311fc4047aa10eabc5f630b66cb..533653bd7aec99781af410a6c10be47d2ffc6fc0 100644 (file)
@@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
                lp->skb_length = skb->len;
                lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
                                                        DMA_TO_DEVICE);
+               if (dma_mapping_error(NULL, lp->skb_physaddr)) {
+                       dev_kfree_skb_any(skb);
+                       dev->stats.tx_dropped++;
+                       netdev_err(dev, "%s: DMA mapping error\n", __func__);
+                       return NETDEV_TX_OK;
+               }
 
                /* Set address of the data in the Transmit Address register */
                macb_writel(lp, TAR, lp->skb_physaddr);
index 30426109711cf4ebec9f1cc28fed75e7961d79a0..86bd93ce2ea30fc66f1c9c63fdf5a5d6209013a2 100644 (file)
@@ -47,7 +47,7 @@
 
 /* Min/Max packet size */
 #define        NIC_HW_MIN_FRS                  64
-#define        NIC_HW_MAX_FRS                  9200 /* 9216 max packet including FCS */
+#define        NIC_HW_MAX_FRS                  9190 /* Excluding L2 header and FCS */
 
 /* Max pkinds */
 #define        NIC_MAX_PKIND                   16
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
 
 struct nicvf_hw_stats {
        u64 rx_bytes;
+       u64 rx_frames;
        u64 rx_ucast_frames;
        u64 rx_bcast_frames;
        u64 rx_mcast_frames;
-       u64 rx_fcs_errors;
-       u64 rx_l2_errors;
+       u64 rx_drops;
        u64 rx_drop_red;
        u64 rx_drop_red_bytes;
        u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
        u64 rx_drop_mcast;
        u64 rx_drop_l3_bcast;
        u64 rx_drop_l3_mcast;
+       u64 rx_fcs_errors;
+       u64 rx_l2_errors;
+
+       u64 tx_bytes;
+       u64 tx_frames;
+       u64 tx_ucast_frames;
+       u64 tx_bcast_frames;
+       u64 tx_mcast_frames;
+       u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+       /* CQE Rx errs */
        u64 rx_bgx_truncated_pkts;
        u64 rx_jabber_errs;
        u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
        u64 rx_l4_pclp;
        u64 rx_truncated_pkts;
 
-       u64 tx_bytes_ok;
-       u64 tx_ucast_frames_ok;
-       u64 tx_bcast_frames_ok;
-       u64 tx_mcast_frames_ok;
-       u64 tx_drops;
-};
-
-struct nicvf_drv_stats {
-       /* Rx */
-       u64 rx_frames_ok;
-       u64 rx_frames_64;
-       u64 rx_frames_127;
-       u64 rx_frames_255;
-       u64 rx_frames_511;
-       u64 rx_frames_1023;
-       u64 rx_frames_1518;
-       u64 rx_frames_jumbo;
-       u64 rx_drops;
-
+       /* CQE Tx errs */
+       u64 tx_desc_fault;
+       u64 tx_hdr_cons_err;
+       u64 tx_subdesc_err;
+       u64 tx_max_size_exceeded;
+       u64 tx_imm_size_oflow;
+       u64 tx_data_seq_err;
+       u64 tx_mem_seq_err;
+       u64 tx_lock_viol;
+       u64 tx_data_fault;
+       u64 tx_tstmp_conflict;
+       u64 tx_tstmp_timeout;
+       u64 tx_mem_fault;
+       u64 tx_csum_overlap;
+       u64 tx_csum_overflow;
+
+       /* driver debug stats */
        u64 rcv_buffer_alloc_failures;
-
-       /* Tx */
-       u64 tx_frames_ok;
-       u64 tx_drops;
        u64 tx_tso;
        u64 tx_timeout;
        u64 txq_stop;
        u64 txq_wake;
+
+       struct u64_stats_sync   syncp;
 };
 
 struct nicvf {
@@ -282,7 +291,6 @@ struct nicvf {
 
        u8                      node;
        u8                      cpi_alg;
-       u16                     mtu;
        bool                    link_up;
        u8                      duplex;
        u32                     speed;
@@ -298,7 +306,7 @@ struct nicvf {
 
        /* Stats */
        struct nicvf_hw_stats   hw_stats;
-       struct nicvf_drv_stats  drv_stats;
+       struct nicvf_drv_stats  __percpu *drv_stats;
        struct bgx_stats        bgx_stats;
 
        /* MSI-X  */
index 2bbf4cbf08b21f273100d589f6893eb2e04fd3b4..6677b96e1f3f6e033ac985847e3b65b27db17842 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/pci.h>
 #include <linux/etherdevice.h>
 #include <linux/of.h>
+#include <linux/if_vlan.h>
 
 #include "nic_reg.h"
 #include "nic.h"
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
 /* Update hardware min/max frame size */
 static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
 {
-       if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
-               dev_err(&nic->pdev->dev,
-                       "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
-                          vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+       int bgx, lmac, lmac_cnt;
+       u64 lmac_credits;
+
+       if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
                return 1;
-       }
-       new_frs += ETH_HLEN;
-       if (new_frs <= nic->pkind.maxlen)
-               return 0;
 
-       nic->pkind.maxlen = new_frs;
-       nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+       bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+       lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+       lmac += bgx * MAX_LMAC_PER_BGX;
+
+       new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
+
+       /* Update corresponding LMAC credits */
+       lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+       lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
+       lmac_credits &= ~(0xFFFFFULL << 12);
+       lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
+       nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
+
+       /* Enforce MTU in HW
+        * This config is supported only from 88xx pass 2.0 onwards.
+        */
+       if (!pass1_silicon(nic->pdev))
+               nic_reg_write(nic,
+                             NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
        return 0;
 }
 
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
 
        /* PKIND configuration */
        nic->pkind.minlen = 0;
-       nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+       nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
        nic->pkind.lenerr_en = 1;
        nic->pkind.rx_hdr = 0;
        nic->pkind.hdr_sl = 0;
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
                        nic_reg_write(nic, reg_addr, 0);
                }
        }
+
        return 0;
 }
 
index edf779f5a227022df6069e267ef697ef05e784c8..80d46337cf29183a3a0911b0d94ff23ad08f3418 100644 (file)
 #define   NIC_PF_MPI_0_2047_CFG                        (0x210000)
 #define   NIC_PF_RSSI_0_4097_RQ                        (0x220000)
 #define   NIC_PF_LMAC_0_7_CFG                  (0x240000)
+#define   NIC_PF_LMAC_0_7_CFG2                 (0x240100)
 #define   NIC_PF_LMAC_0_7_SW_XOFF              (0x242000)
 #define   NIC_PF_LMAC_0_7_CREDIT               (0x244000)
 #define   NIC_PF_CHAN_0_255_TX_CFG             (0x400000)
index ad4fddb5542160b4512643cde5228f7a9d28b2e0..432bf6be57cb2ff596d43ec08f2ff0aa250d4b86 100644 (file)
@@ -36,11 +36,11 @@ struct nicvf_stat {
 
 static const struct nicvf_stat nicvf_hw_stats[] = {
        NICVF_HW_STAT(rx_bytes),
+       NICVF_HW_STAT(rx_frames),
        NICVF_HW_STAT(rx_ucast_frames),
        NICVF_HW_STAT(rx_bcast_frames),
        NICVF_HW_STAT(rx_mcast_frames),
-       NICVF_HW_STAT(rx_fcs_errors),
-       NICVF_HW_STAT(rx_l2_errors),
+       NICVF_HW_STAT(rx_drops),
        NICVF_HW_STAT(rx_drop_red),
        NICVF_HW_STAT(rx_drop_red_bytes),
        NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
        NICVF_HW_STAT(rx_drop_mcast),
        NICVF_HW_STAT(rx_drop_l3_bcast),
        NICVF_HW_STAT(rx_drop_l3_mcast),
-       NICVF_HW_STAT(rx_bgx_truncated_pkts),
-       NICVF_HW_STAT(rx_jabber_errs),
-       NICVF_HW_STAT(rx_fcs_errs),
-       NICVF_HW_STAT(rx_bgx_errs),
-       NICVF_HW_STAT(rx_prel2_errs),
-       NICVF_HW_STAT(rx_l2_hdr_malformed),
-       NICVF_HW_STAT(rx_oversize),
-       NICVF_HW_STAT(rx_undersize),
-       NICVF_HW_STAT(rx_l2_len_mismatch),
-       NICVF_HW_STAT(rx_l2_pclp),
-       NICVF_HW_STAT(rx_ip_ver_errs),
-       NICVF_HW_STAT(rx_ip_csum_errs),
-       NICVF_HW_STAT(rx_ip_hdr_malformed),
-       NICVF_HW_STAT(rx_ip_payload_malformed),
-       NICVF_HW_STAT(rx_ip_ttl_errs),
-       NICVF_HW_STAT(rx_l3_pclp),
-       NICVF_HW_STAT(rx_l4_malformed),
-       NICVF_HW_STAT(rx_l4_csum_errs),
-       NICVF_HW_STAT(rx_udp_len_errs),
-       NICVF_HW_STAT(rx_l4_port_errs),
-       NICVF_HW_STAT(rx_tcp_flag_errs),
-       NICVF_HW_STAT(rx_tcp_offset_errs),
-       NICVF_HW_STAT(rx_l4_pclp),
-       NICVF_HW_STAT(rx_truncated_pkts),
-       NICVF_HW_STAT(tx_bytes_ok),
-       NICVF_HW_STAT(tx_ucast_frames_ok),
-       NICVF_HW_STAT(tx_bcast_frames_ok),
-       NICVF_HW_STAT(tx_mcast_frames_ok),
+       NICVF_HW_STAT(rx_fcs_errors),
+       NICVF_HW_STAT(rx_l2_errors),
+       NICVF_HW_STAT(tx_bytes),
+       NICVF_HW_STAT(tx_frames),
+       NICVF_HW_STAT(tx_ucast_frames),
+       NICVF_HW_STAT(tx_bcast_frames),
+       NICVF_HW_STAT(tx_mcast_frames),
+       NICVF_HW_STAT(tx_drops),
 };
 
 static const struct nicvf_stat nicvf_drv_stats[] = {
-       NICVF_DRV_STAT(rx_frames_ok),
-       NICVF_DRV_STAT(rx_frames_64),
-       NICVF_DRV_STAT(rx_frames_127),
-       NICVF_DRV_STAT(rx_frames_255),
-       NICVF_DRV_STAT(rx_frames_511),
-       NICVF_DRV_STAT(rx_frames_1023),
-       NICVF_DRV_STAT(rx_frames_1518),
-       NICVF_DRV_STAT(rx_frames_jumbo),
-       NICVF_DRV_STAT(rx_drops),
+       NICVF_DRV_STAT(rx_bgx_truncated_pkts),
+       NICVF_DRV_STAT(rx_jabber_errs),
+       NICVF_DRV_STAT(rx_fcs_errs),
+       NICVF_DRV_STAT(rx_bgx_errs),
+       NICVF_DRV_STAT(rx_prel2_errs),
+       NICVF_DRV_STAT(rx_l2_hdr_malformed),
+       NICVF_DRV_STAT(rx_oversize),
+       NICVF_DRV_STAT(rx_undersize),
+       NICVF_DRV_STAT(rx_l2_len_mismatch),
+       NICVF_DRV_STAT(rx_l2_pclp),
+       NICVF_DRV_STAT(rx_ip_ver_errs),
+       NICVF_DRV_STAT(rx_ip_csum_errs),
+       NICVF_DRV_STAT(rx_ip_hdr_malformed),
+       NICVF_DRV_STAT(rx_ip_payload_malformed),
+       NICVF_DRV_STAT(rx_ip_ttl_errs),
+       NICVF_DRV_STAT(rx_l3_pclp),
+       NICVF_DRV_STAT(rx_l4_malformed),
+       NICVF_DRV_STAT(rx_l4_csum_errs),
+       NICVF_DRV_STAT(rx_udp_len_errs),
+       NICVF_DRV_STAT(rx_l4_port_errs),
+       NICVF_DRV_STAT(rx_tcp_flag_errs),
+       NICVF_DRV_STAT(rx_tcp_offset_errs),
+       NICVF_DRV_STAT(rx_l4_pclp),
+       NICVF_DRV_STAT(rx_truncated_pkts),
+
+       NICVF_DRV_STAT(tx_desc_fault),
+       NICVF_DRV_STAT(tx_hdr_cons_err),
+       NICVF_DRV_STAT(tx_subdesc_err),
+       NICVF_DRV_STAT(tx_max_size_exceeded),
+       NICVF_DRV_STAT(tx_imm_size_oflow),
+       NICVF_DRV_STAT(tx_data_seq_err),
+       NICVF_DRV_STAT(tx_mem_seq_err),
+       NICVF_DRV_STAT(tx_lock_viol),
+       NICVF_DRV_STAT(tx_data_fault),
+       NICVF_DRV_STAT(tx_tstmp_conflict),
+       NICVF_DRV_STAT(tx_tstmp_timeout),
+       NICVF_DRV_STAT(tx_mem_fault),
+       NICVF_DRV_STAT(tx_csum_overlap),
+       NICVF_DRV_STAT(tx_csum_overflow),
+
        NICVF_DRV_STAT(rcv_buffer_alloc_failures),
-       NICVF_DRV_STAT(tx_frames_ok),
        NICVF_DRV_STAT(tx_tso),
-       NICVF_DRV_STAT(tx_drops),
        NICVF_DRV_STAT(tx_timeout),
        NICVF_DRV_STAT(txq_stop),
        NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
                                    struct ethtool_stats *stats, u64 *data)
 {
        struct nicvf *nic = netdev_priv(netdev);
-       int stat;
-       int sqs;
+       int stat, tmp_stats;
+       int sqs, cpu;
 
        nicvf_update_stats(nic);
 
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
        for (stat = 0; stat < nicvf_n_hw_stats; stat++)
                *(data++) = ((u64 *)&nic->hw_stats)
                                [nicvf_hw_stats[stat].index];
-       for (stat = 0; stat < nicvf_n_drv_stats; stat++)
-               *(data++) = ((u64 *)&nic->drv_stats)
-                               [nicvf_drv_stats[stat].index];
+       for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
+               tmp_stats = 0;
+               for_each_possible_cpu(cpu)
+                       tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
+                                    [nicvf_drv_stats[stat].index];
+               *(data++) = tmp_stats;
+       }
 
        nicvf_get_qset_stats(nic, stats, &data);
 
index 45a13f718863f33a6ccc7f14a37f2f2fc117be84..8a37012c9c89637560fc3ae7554718b0f96163e4 100644 (file)
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
                return qidx;
 }
 
-static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
-                                         struct sk_buff *skb)
-{
-       if (skb->len <= 64)
-               nic->drv_stats.rx_frames_64++;
-       else if (skb->len <= 127)
-               nic->drv_stats.rx_frames_127++;
-       else if (skb->len <= 255)
-               nic->drv_stats.rx_frames_255++;
-       else if (skb->len <= 511)
-               nic->drv_stats.rx_frames_511++;
-       else if (skb->len <= 1023)
-               nic->drv_stats.rx_frames_1023++;
-       else if (skb->len <= 1518)
-               nic->drv_stats.rx_frames_1518++;
-       else
-               nic->drv_stats.rx_frames_jumbo++;
-}
-
 /* The Cavium ThunderX network controller can *only* be found in SoCs
  * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
  * registers on this platform are implicitly strongly ordered with respect
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
 static int nicvf_init_resources(struct nicvf *nic)
 {
        int err;
-       union nic_mbx mbx = {};
-
-       mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
 
        /* Enable Qset */
        nicvf_qset_config(nic, true);
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
                return err;
        }
 
-       /* Send VF config done msg to PF */
-       nicvf_write_to_mbx(nic, &mbx);
-
        return 0;
 }
 
 static void nicvf_snd_pkt_handler(struct net_device *netdev,
-                                 struct cmp_queue *cq,
                                  struct cqe_send_t *cqe_tx,
                                  int cqe_type, int budget,
                                  unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
                   __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
                   cqe_tx->sqe_ptr, hdr->subdesc_cnt);
 
-       nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+       nicvf_check_cqe_tx_errs(nic, cqe_tx);
        skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
        if (skb) {
                /* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
                return;
        }
 
-       nicvf_set_rx_frame_cnt(nic, skb);
-
        nicvf_set_rxhash(netdev, cqe_rx, skb);
 
        skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +675,7 @@ loop:
                        work_done++;
                break;
                case CQE_TYPE_SEND:
-                       nicvf_snd_pkt_handler(netdev, cq,
+                       nicvf_snd_pkt_handler(netdev,
                                              (void *)cq_desc, CQE_TYPE_SEND,
                                              budget, &tx_pkts, &tx_bytes);
                        tx_done++;
@@ -740,7 +712,7 @@ done:
                nic = nic->pnicvf;
                if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
                        netif_tx_start_queue(txq);
-                       nic->drv_stats.txq_wake++;
+                       this_cpu_inc(nic->drv_stats->txq_wake);
                        if (netif_msg_tx_err(nic))
                                netdev_warn(netdev,
                                            "%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
                netif_tx_stop_queue(txq);
-               nic->drv_stats.txq_stop++;
+               this_cpu_inc(nic->drv_stats->txq_stop);
                if (netif_msg_tx_err(nic))
                        netdev_warn(netdev,
                                    "%s: Transmit ring full, stopping SQ%d\n",
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
        return 0;
 }
 
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+       union nic_mbx mbx = {};
+
+       mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+       mbx.frs.max_frs = mtu;
+       mbx.frs.vf_id = nic->vf_id;
+
+       return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
 int nicvf_open(struct net_device *netdev)
 {
-       int err, qidx;
+       int cpu, err, qidx;
        struct nicvf *nic = netdev_priv(netdev);
        struct queue_set *qs = nic->qs;
        struct nicvf_cq_poll *cq_poll = NULL;
-
-       nic->mtu = netdev->mtu;
+       union nic_mbx mbx = {};
 
        netif_carrier_off(netdev);
 
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
        if (nic->sqs_mode)
                nicvf_get_primary_vf_struct(nic);
 
-       /* Configure receive side scaling */
-       if (!nic->sqs_mode)
+       /* Configure receive side scaling and MTU */
+       if (!nic->sqs_mode) {
                nicvf_rss_init(nic);
+               if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+                       goto cleanup;
+
+               /* Clear percpu stats */
+               for_each_possible_cpu(cpu)
+                       memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
+                              sizeof(struct nicvf_drv_stats));
+       }
 
        err = nicvf_register_interrupts(nic);
        if (err)
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
                nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
 
-       nic->drv_stats.txq_stop = 0;
-       nic->drv_stats.txq_wake = 0;
+       /* Send VF config done msg to PF */
+       mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+       nicvf_write_to_mbx(nic, &mbx);
 
        return 0;
 cleanup:
@@ -1297,17 +1288,6 @@ napi_del:
        return err;
 }
 
-static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
-{
-       union nic_mbx mbx = {};
-
-       mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
-       mbx.frs.max_frs = mtu;
-       mbx.frs.vf_id = nic->vf_id;
-
-       return nicvf_send_msg_to_pf(nic, &mbx);
-}
-
 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct nicvf *nic = netdev_priv(netdev);
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
        if (new_mtu < NIC_HW_MIN_FRS)
                return -EINVAL;
 
+       netdev->mtu = new_mtu;
+
+       if (!netif_running(netdev))
+               return 0;
+
        if (nicvf_update_hw_max_frs(nic, new_mtu))
                return -EINVAL;
-       netdev->mtu = new_mtu;
-       nic->mtu = new_mtu;
 
        return 0;
 }
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
 
 void nicvf_update_stats(struct nicvf *nic)
 {
-       int qidx;
+       int qidx, cpu;
+       u64 tmp_stats = 0;
        struct nicvf_hw_stats *stats = &nic->hw_stats;
-       struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+       struct nicvf_drv_stats *drv_stats;
        struct queue_set *qs = nic->qs;
 
 #define GET_RX_STATS(reg) \
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
        stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
        stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
 
-       stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
-       stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
-       stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
-       stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+       stats->tx_bytes = GET_TX_STATS(TX_OCTS);
+       stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
+       stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
+       stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
        stats->tx_drops = GET_TX_STATS(TX_DROP);
 
-       drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
-                                 stats->tx_bcast_frames_ok +
-                                 stats->tx_mcast_frames_ok;
-       drv_stats->rx_frames_ok = stats->rx_ucast_frames +
-                                 stats->rx_bcast_frames +
-                                 stats->rx_mcast_frames;
-       drv_stats->rx_drops = stats->rx_drop_red +
-                             stats->rx_drop_overrun;
-       drv_stats->tx_drops = stats->tx_drops;
+       /* On T88 pass 2.0, the dummy SQE added for TSO notification
+        * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
+        * pointed by dummy SQE and results in tx_drops counter being
+        * incremented. Subtracting it from tx_tso counter will give
+        * exact tx_drops counter.
+        */
+       if (nic->t88 && nic->hw_tso) {
+               for_each_possible_cpu(cpu) {
+                       drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
+                       tmp_stats += drv_stats->tx_tso;
+               }
+               stats->tx_drops = tmp_stats - stats->tx_drops;
+       }
+       stats->tx_frames = stats->tx_ucast_frames +
+                          stats->tx_bcast_frames +
+                          stats->tx_mcast_frames;
+       stats->rx_frames = stats->rx_ucast_frames +
+                          stats->rx_bcast_frames +
+                          stats->rx_mcast_frames;
+       stats->rx_drops = stats->rx_drop_red +
+                         stats->rx_drop_overrun;
 
        /* Update RQ and SQ stats */
        for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
 {
        struct nicvf *nic = netdev_priv(netdev);
        struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
-       struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 
        nicvf_update_stats(nic);
 
        stats->rx_bytes = hw_stats->rx_bytes;
-       stats->rx_packets = drv_stats->rx_frames_ok;
-       stats->rx_dropped = drv_stats->rx_drops;
+       stats->rx_packets = hw_stats->rx_frames;
+       stats->rx_dropped = hw_stats->rx_drops;
        stats->multicast = hw_stats->rx_mcast_frames;
 
-       stats->tx_bytes = hw_stats->tx_bytes_ok;
-       stats->tx_packets = drv_stats->tx_frames_ok;
-       stats->tx_dropped = drv_stats->tx_drops;
+       stats->tx_bytes = hw_stats->tx_bytes;
+       stats->tx_packets = hw_stats->tx_frames;
+       stats->tx_dropped = hw_stats->tx_drops;
 
        return stats;
 }
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
                netdev_warn(dev, "%s: Transmit timed out, resetting\n",
                            dev->name);
 
-       nic->drv_stats.tx_timeout++;
+       this_cpu_inc(nic->drv_stats->tx_timeout);
        schedule_work(&nic->reset_task);
 }
 
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_free_netdev;
        }
 
+       nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
+       if (!nic->drv_stats) {
+               err = -ENOMEM;
+               goto err_free_netdev;
+       }
+
        err = nicvf_set_qset_resources(nic);
        if (err)
                goto err_free_netdev;
@@ -1648,6 +1649,8 @@ err_unregister_interrupts:
        nicvf_unregister_interrupts(nic);
 err_free_netdev:
        pci_set_drvdata(pdev, NULL);
+       if (nic->drv_stats)
+               free_percpu(nic->drv_stats);
        free_netdev(netdev);
 err_release_regions:
        pci_release_regions(pdev);
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
                unregister_netdev(pnetdev);
        nicvf_unregister_interrupts(nic);
        pci_set_drvdata(pdev, NULL);
+       if (nic->drv_stats)
+               free_percpu(nic->drv_stats);
        free_netdev(netdev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index a4fc501558817639fbc0da5cb9bb388f0f49a51f..747ef08829763db89b8fb1802eab9b47a0eef6b0 100644 (file)
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
                nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
                                           order);
                if (!nic->rb_page) {
-                       nic->drv_stats.rcv_buffer_alloc_failures++;
+                       this_cpu_inc(nic->pnicvf->drv_stats->
+                                    rcv_buffer_alloc_failures);
                        return -ENOMEM;
                }
                nic->rb_page_offset = 0;
@@ -270,7 +271,8 @@ refill:
                              rbdr_idx, new_rb);
 next_rbdr:
        /* Re-enable RBDR interrupts only if buffer allocation is success */
-       if (!nic->rb_alloc_fail && rbdr->enable)
+       if (!nic->rb_alloc_fail && rbdr->enable &&
+           netif_running(nic->pnicvf->netdev))
                nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
 
        if (rbdr_idx)
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
 
 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 {
+       struct sk_buff *skb;
+
        if (!sq)
                return;
        if (!sq->dmem.base)
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
                                  sq->dmem.q_len * TSO_HEADER_SIZE,
                                  sq->tso_hdrs, sq->tso_hdrs_phys);
 
+       /* Free pending skbs in the queue */
+       smp_rmb();
+       while (sq->head != sq->tail) {
+               skb = (struct sk_buff *)sq->skbuff[sq->head];
+               if (skb)
+                       dev_kfree_skb_any(skb);
+               sq->head++;
+               sq->head &= (sq->dmem.q_len - 1);
+       }
        kfree(sq->skbuff);
        nicvf_free_q_desc_mem(nic, &sq->dmem);
 }
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
 {
        union nic_mbx mbx = {};
 
-       /* Reset all RXQ's stats */
+       /* Reset all RQ/SQ and VF stats */
        mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+       mbx.reset_stat.rx_stat_mask = 0x3FFF;
+       mbx.reset_stat.tx_stat_mask = 0x1F;
        mbx.reset_stat.rq_stat_mask = 0xFFFF;
+       mbx.reset_stat.sq_stat_mask = 0xFFFF;
        nicvf_send_msg_to_pf(nic, &mbx);
 }
 
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
        mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
        nicvf_send_msg_to_pf(nic, &mbx);
 
-       nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
-       if (!nic->sqs_mode)
+       if (!nic->sqs_mode && (qidx == 0)) {
+               /* Enable checking L3/L4 length and TCP/UDP checksums */
+               nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
+                                     (BIT(24) | BIT(23) | BIT(21)));
                nicvf_config_vlan_stripping(nic, nic->netdev->features);
+       }
 
        /* Enable Receive queue */
        memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
                hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
                /* For non-tunneled pkts, point this to L2 ethertype */
                hdr->inner_l3_offset = skb_network_offset(skb) - 2;
-               nic->drv_stats.tx_tso++;
+               this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
        }
 }
 
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
 
        nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
 
-       nic->drv_stats.tx_tso++;
+       this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
        return 1;
 }
 
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
 /* Check for errors in the receive cmp.queue entry */
 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 {
-       struct nicvf_hw_stats *stats = &nic->hw_stats;
-
        if (!cqe_rx->err_level && !cqe_rx->err_opcode)
                return 0;
 
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 
        switch (cqe_rx->err_opcode) {
        case CQ_RX_ERROP_RE_PARTIAL:
-               stats->rx_bgx_truncated_pkts++;
+               this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
                break;
        case CQ_RX_ERROP_RE_JABBER:
-               stats->rx_jabber_errs++;
+               this_cpu_inc(nic->drv_stats->rx_jabber_errs);
                break;
        case CQ_RX_ERROP_RE_FCS:
-               stats->rx_fcs_errs++;
+               this_cpu_inc(nic->drv_stats->rx_fcs_errs);
                break;
        case CQ_RX_ERROP_RE_RX_CTL:
-               stats->rx_bgx_errs++;
+               this_cpu_inc(nic->drv_stats->rx_bgx_errs);
                break;
        case CQ_RX_ERROP_PREL2_ERR:
-               stats->rx_prel2_errs++;
+               this_cpu_inc(nic->drv_stats->rx_prel2_errs);
                break;
        case CQ_RX_ERROP_L2_MAL:
-               stats->rx_l2_hdr_malformed++;
+               this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
                break;
        case CQ_RX_ERROP_L2_OVERSIZE:
-               stats->rx_oversize++;
+               this_cpu_inc(nic->drv_stats->rx_oversize);
                break;
        case CQ_RX_ERROP_L2_UNDERSIZE:
-               stats->rx_undersize++;
+               this_cpu_inc(nic->drv_stats->rx_undersize);
                break;
        case CQ_RX_ERROP_L2_LENMISM:
-               stats->rx_l2_len_mismatch++;
+               this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
                break;
        case CQ_RX_ERROP_L2_PCLP:
-               stats->rx_l2_pclp++;
+               this_cpu_inc(nic->drv_stats->rx_l2_pclp);
                break;
        case CQ_RX_ERROP_IP_NOT:
-               stats->rx_ip_ver_errs++;
+               this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
                break;
        case CQ_RX_ERROP_IP_CSUM_ERR:
-               stats->rx_ip_csum_errs++;
+               this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
                break;
        case CQ_RX_ERROP_IP_MAL:
-               stats->rx_ip_hdr_malformed++;
+               this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
                break;
        case CQ_RX_ERROP_IP_MALD:
-               stats->rx_ip_payload_malformed++;
+               this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
                break;
        case CQ_RX_ERROP_IP_HOP:
-               stats->rx_ip_ttl_errs++;
+               this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
                break;
        case CQ_RX_ERROP_L3_PCLP:
-               stats->rx_l3_pclp++;
+               this_cpu_inc(nic->drv_stats->rx_l3_pclp);
                break;
        case CQ_RX_ERROP_L4_MAL:
-               stats->rx_l4_malformed++;
+               this_cpu_inc(nic->drv_stats->rx_l4_malformed);
                break;
        case CQ_RX_ERROP_L4_CHK:
-               stats->rx_l4_csum_errs++;
+               this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
                break;
        case CQ_RX_ERROP_UDP_LEN:
-               stats->rx_udp_len_errs++;
+               this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
                break;
        case CQ_RX_ERROP_L4_PORT:
-               stats->rx_l4_port_errs++;
+               this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
                break;
        case CQ_RX_ERROP_TCP_FLAG:
-               stats->rx_tcp_flag_errs++;
+               this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
                break;
        case CQ_RX_ERROP_TCP_OFFSET:
-               stats->rx_tcp_offset_errs++;
+               this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
                break;
        case CQ_RX_ERROP_L4_PCLP:
-               stats->rx_l4_pclp++;
+               this_cpu_inc(nic->drv_stats->rx_l4_pclp);
                break;
        case CQ_RX_ERROP_RBDR_TRUNC:
-               stats->rx_truncated_pkts++;
+               this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
                break;
        }
 
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 }
 
 /* Check for errors in the send cmp.queue entry */
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
-                           struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
 {
-       struct cmp_queue_stats *stats = &cq->stats;
-
        switch (cqe_tx->send_status) {
        case CQ_TX_ERROP_GOOD:
-               stats->tx.good++;
                return 0;
        case CQ_TX_ERROP_DESC_FAULT:
-               stats->tx.desc_fault++;
+               this_cpu_inc(nic->drv_stats->tx_desc_fault);
                break;
        case CQ_TX_ERROP_HDR_CONS_ERR:
-               stats->tx.hdr_cons_err++;
+               this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
                break;
        case CQ_TX_ERROP_SUBDC_ERR:
-               stats->tx.subdesc_err++;
+               this_cpu_inc(nic->drv_stats->tx_subdesc_err);
+               break;
+       case CQ_TX_ERROP_MAX_SIZE_VIOL:
+               this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
                break;
        case CQ_TX_ERROP_IMM_SIZE_OFLOW:
-               stats->tx.imm_size_oflow++;
+               this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
                break;
        case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
-               stats->tx.data_seq_err++;
+               this_cpu_inc(nic->drv_stats->tx_data_seq_err);
                break;
        case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
-               stats->tx.mem_seq_err++;
+               this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
                break;
        case CQ_TX_ERROP_LOCK_VIOL:
-               stats->tx.lock_viol++;
+               this_cpu_inc(nic->drv_stats->tx_lock_viol);
                break;
        case CQ_TX_ERROP_DATA_FAULT:
-               stats->tx.data_fault++;
+               this_cpu_inc(nic->drv_stats->tx_data_fault);
                break;
        case CQ_TX_ERROP_TSTMP_CONFLICT:
-               stats->tx.tstmp_conflict++;
+               this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
                break;
        case CQ_TX_ERROP_TSTMP_TIMEOUT:
-               stats->tx.tstmp_timeout++;
+               this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
                break;
        case CQ_TX_ERROP_MEM_FAULT:
-               stats->tx.mem_fault++;
+               this_cpu_inc(nic->drv_stats->tx_mem_fault);
                break;
        case CQ_TX_ERROP_CK_OVERLAP:
-               stats->tx.csum_overlap++;
+               this_cpu_inc(nic->drv_stats->tx_csum_overlap);
                break;
        case CQ_TX_ERROP_CK_OFLOW:
-               stats->tx.csum_overflow++;
+               this_cpu_inc(nic->drv_stats->tx_csum_overflow);
                break;
        }
 
index 869f3386028b1e765c4860cf86560f1f6b564b42..2e3c940c10933049b81c590d1f0ff72bb79a13f0 100644 (file)
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
        CQ_TX_ERROP_DESC_FAULT = 0x10,
        CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
        CQ_TX_ERROP_SUBDC_ERR = 0x12,
+       CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
        CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
        CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
        CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
        CQ_TX_ERROP_ENUM_LAST = 0x8a,
 };
 
-struct cmp_queue_stats {
-       struct tx_stats {
-               u64 good;
-               u64 desc_fault;
-               u64 hdr_cons_err;
-               u64 subdesc_err;
-               u64 imm_size_oflow;
-               u64 data_seq_err;
-               u64 mem_seq_err;
-               u64 lock_viol;
-               u64 data_fault;
-               u64 tstmp_conflict;
-               u64 tstmp_timeout;
-               u64 mem_fault;
-               u64 csum_overlap;
-               u64 csum_overflow;
-       } tx;
-} ____cacheline_aligned_in_smp;
-
 enum RQ_SQ_STATS {
        RQ_SQ_STATS_OCTS,
        RQ_SQ_STATS_PKTS,
@@ -241,7 +223,6 @@ struct cmp_queue {
        spinlock_t      lock;  /* lock to serialize processing CQEs */
        void            *desc;
        struct q_desc_mem   dmem;
-       struct cmp_queue_stats  stats;
        int             irq;
 } ____cacheline_aligned_in_smp;
 
@@ -336,6 +317,5 @@ u64  nicvf_queue_reg_read(struct nicvf *nic,
 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
-                           struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
 #endif /* NICVF_QUEUES_H */
index 8bbaedbb7b946353470f4bb57138f542025b068b..050e21fbb1471d5fa86f735386d90491660cce43 100644 (file)
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
        if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
-               bgx->bgx_id =
-                   (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+               bgx->bgx_id = (pci_resource_start(pdev,
+                       PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
                bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
                bgx->max_lmac = MAX_LMAC_PER_BGX;
                bgx_vnic[bgx->bgx_id] = bgx;
index d59c71e4a0008bb576c1c3a6e192eb225381c82a..01cc7c8591313fca033a7e791bc57ccd9adb776b 100644 (file)
@@ -28,6 +28,8 @@
 #define    MAX_DMAC_PER_LMAC                   8
 #define    MAX_FRAME_SIZE                      9216
 
+#define           BGX_ID_MASK                          0x3
+
 #define    MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE   2
 
 /* Registers */
index 1e74fd6085df43b954f2343cd47ba1f8b592deaf..e19a0ca8e5ddb315ec5900c327bce9cab69ee96c 100644 (file)
@@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
                   rq->cntxt_id, fl_id, 0xffff);
        dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
                          rq->desc, rq->phys_addr);
-       napi_hash_del(&rq->napi);
        netif_napi_del(&rq->napi);
        rq->netdev = NULL;
        rq->cntxt_id = rq->abs_id = 0;
index cece8a08edca1de45c4c56f89704ddd8523d7a3f..93aa2939142a226928c2be5926f772a234cd1294 100644 (file)
@@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
                if (eqo->q.created) {
                        be_eq_clean(eqo);
                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
-                       napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
                        free_cpumask_var(eqo->affinity_mask);
                }
index f05ea56dcff2cdecee79e21ac38c1c9623378912..941c8e2c944e11d349937c89cc143eb94cb78fc7 100644 (file)
@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
 
 static void sky2_shutdown(struct pci_dev *pdev)
 {
+       struct sky2_hw *hw = pci_get_drvdata(pdev);
+       int port;
+
+       for (port = 0; port < hw->ports; port++) {
+               struct net_device *ndev = hw->dev[port];
+
+               rtnl_lock();
+               if (netif_running(ndev)) {
+                       dev_close(ndev);
+                       netif_device_detach(ndev);
+               }
+               rtnl_unlock();
+       }
        sky2_suspend(&pdev->dev);
        pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
        pci_set_power_state(pdev, PCI_D3hot);
index 3818c5e06ebac5099f8051c813222349a2f6a6a5..4b78168a5f3ce8cd8b971c3e88c5addf1f46a7e3 100644 (file)
@@ -107,7 +107,7 @@ config DWMAC_STI
 config DWMAC_STM32
        tristate "STM32 DWMAC support"
        default ARCH_STM32
-       depends on OF && HAS_IOMEM
+       depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
        select MFD_SYSCON
        ---help---
          Support for ethernet controller on STM32 SOCs.
index 2920e2ee38647095afa97653c3d4dd9901f77d23..489ef146201e61c629c17010f672a621642e94b3 100644 (file)
@@ -63,8 +63,8 @@
 #define TSE_PCS_SGMII_LINK_TIMER_0                     0x0D40
 #define TSE_PCS_SGMII_LINK_TIMER_1                     0x0003
 #define TSE_PCS_SW_RESET_TIMEOUT                       100
-#define TSE_PCS_USE_SGMII_AN_MASK                      BIT(2)
-#define TSE_PCS_USE_SGMII_ENA                          BIT(1)
+#define TSE_PCS_USE_SGMII_AN_MASK                      BIT(1)
+#define TSE_PCS_USE_SGMII_ENA                          BIT(0)
 
 #define SGMII_ADAPTER_CTRL_REG                         0x00
 #define SGMII_ADAPTER_DISABLE                          0x0001
index d3292c4a6eda3f6f9f17fecfc18e86880ebc4d61..6d2de4e01f6d00404bde51f3816ba2271adff865 100644 (file)
@@ -120,14 +120,17 @@ struct stmmac_extra_stats {
        unsigned long ip_csum_bypassed;
        unsigned long ipv4_pkt_rcvd;
        unsigned long ipv6_pkt_rcvd;
-       unsigned long rx_msg_type_ext_no_ptp;
-       unsigned long rx_msg_type_sync;
-       unsigned long rx_msg_type_follow_up;
-       unsigned long rx_msg_type_delay_req;
-       unsigned long rx_msg_type_delay_resp;
-       unsigned long rx_msg_type_pdelay_req;
-       unsigned long rx_msg_type_pdelay_resp;
-       unsigned long rx_msg_type_pdelay_follow_up;
+       unsigned long no_ptp_rx_msg_type_ext;
+       unsigned long ptp_rx_msg_type_sync;
+       unsigned long ptp_rx_msg_type_follow_up;
+       unsigned long ptp_rx_msg_type_delay_req;
+       unsigned long ptp_rx_msg_type_delay_resp;
+       unsigned long ptp_rx_msg_type_pdelay_req;
+       unsigned long ptp_rx_msg_type_pdelay_resp;
+       unsigned long ptp_rx_msg_type_pdelay_follow_up;
+       unsigned long ptp_rx_msg_type_announce;
+       unsigned long ptp_rx_msg_type_management;
+       unsigned long ptp_rx_msg_pkt_reserved_type;
        unsigned long ptp_frame_type;
        unsigned long ptp_ver;
        unsigned long timestamp_dropped;
@@ -482,11 +485,12 @@ struct stmmac_ops {
 /* PTP and HW Timer helpers */
 struct stmmac_hwtimestamp {
        void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
-       u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate);
+       u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+                                          int gmac4);
        int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
        int (*config_addend) (void __iomem *ioaddr, u32 addend);
        int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
-                              int add_sub);
+                              int add_sub, int gmac4);
         u64(*get_systime) (void __iomem *ioaddr);
 };
 
index 2e4c171a2b4146f6276855ab14a3ba83e85680ad..e3c86d42210953c5f4f985144a3c6eddfe6f4c73 100644 (file)
 #define        ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
 
 /* Extended RDES4 message type definitions */
-#define RDES_EXT_NO_PTP                        0
-#define RDES_EXT_SYNC                  1
-#define RDES_EXT_FOLLOW_UP             2
-#define RDES_EXT_DELAY_REQ             3
-#define RDES_EXT_DELAY_RESP            4
-#define RDES_EXT_PDELAY_REQ            5
-#define RDES_EXT_PDELAY_RESP           6
-#define RDES_EXT_PDELAY_FOLLOW_UP      7
+#define RDES_EXT_NO_PTP                        0x0
+#define RDES_EXT_SYNC                  0x1
+#define RDES_EXT_FOLLOW_UP             0x2
+#define RDES_EXT_DELAY_REQ             0x3
+#define RDES_EXT_DELAY_RESP            0x4
+#define RDES_EXT_PDELAY_REQ            0x5
+#define RDES_EXT_PDELAY_RESP           0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP      0x7
+#define RDES_PTP_ANNOUNCE              0x8
+#define RDES_PTP_MANAGEMENT            0x9
+#define RDES_PTP_SIGNALING             0xa
+#define RDES_PTP_PKT_RESERVED_TYPE     0xf
 
 /* Basic descriptor structure for normal and alternate descriptors */
 struct dma_desc {
index a1b17cd7886b1e1d316f8154948e682d7a8eb8e8..a601f8d43b75d8a97cc007ebaad4eed9ebd66ae3 100644 (file)
@@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
                x->ipv4_pkt_rcvd++;
        if (rdes1 & RDES1_IPV6_HEADER)
                x->ipv6_pkt_rcvd++;
-       if (message_type == RDES_EXT_SYNC)
-               x->rx_msg_type_sync++;
+
+       if (message_type == RDES_EXT_NO_PTP)
+               x->no_ptp_rx_msg_type_ext++;
+       else if (message_type == RDES_EXT_SYNC)
+               x->ptp_rx_msg_type_sync++;
        else if (message_type == RDES_EXT_FOLLOW_UP)
-               x->rx_msg_type_follow_up++;
+               x->ptp_rx_msg_type_follow_up++;
        else if (message_type == RDES_EXT_DELAY_REQ)
-               x->rx_msg_type_delay_req++;
+               x->ptp_rx_msg_type_delay_req++;
        else if (message_type == RDES_EXT_DELAY_RESP)
-               x->rx_msg_type_delay_resp++;
+               x->ptp_rx_msg_type_delay_resp++;
        else if (message_type == RDES_EXT_PDELAY_REQ)
-               x->rx_msg_type_pdelay_req++;
+               x->ptp_rx_msg_type_pdelay_req++;
        else if (message_type == RDES_EXT_PDELAY_RESP)
-               x->rx_msg_type_pdelay_resp++;
+               x->ptp_rx_msg_type_pdelay_resp++;
        else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
-               x->rx_msg_type_pdelay_follow_up++;
-       else
-               x->rx_msg_type_ext_no_ptp++;
+               x->ptp_rx_msg_type_pdelay_follow_up++;
+       else if (message_type == RDES_PTP_ANNOUNCE)
+               x->ptp_rx_msg_type_announce++;
+       else if (message_type == RDES_PTP_MANAGEMENT)
+               x->ptp_rx_msg_type_management++;
+       else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+               x->ptp_rx_msg_pkt_reserved_type++;
 
        if (rdes1 & RDES1_PTP_PACKET_TYPE)
                x->ptp_frame_type++;
@@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
 
 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
 {
-       return (p->des3 & TDES3_TIMESTAMP_STATUS)
-               >> TDES3_TIMESTAMP_STATUS_SHIFT;
+       /* Context type from W/B descriptor must be zero */
+       if (p->des3 & TDES3_CONTEXT_TYPE)
+               return -EINVAL;
+
+       /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+       if (p->des3 & TDES3_TIMESTAMP_STATUS)
+               return 0;
+
+       return 1;
 }
 
-/*  NOTE: For RX CTX bit has to be checked before
- *  HAVE a specific function for TX and another one for RX
- */
-static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
        u64 ns;
@@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_rx_check_timestamp(void *desc)
+{
+       struct dma_desc *p = (struct dma_desc *)desc;
+       u32 own, ctxt;
+       int ret = 1;
+
+       own = p->des3 & RDES3_OWN;
+       ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+               >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+
+       if (likely(!own && ctxt)) {
+               if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+                       /* Corrupted value */
+                       ret = -EINVAL;
+               else
+                       /* A valid Timestamp is ready to be read */
+                       ret = 0;
+       }
+
+       /* Timestamp not ready */
+       return ret;
+}
+
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
+       int ret = -EINVAL;
+
+       /* Get the status from normal w/b descriptor */
+       if (likely(p->des3 & TDES3_RS1V)) {
+               if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
+                       int i = 0;
+
+                       /* Check if timestamp is OK from context descriptor */
+                       do {
+                               ret = dwmac4_rx_check_timestamp(desc);
+                               if (ret < 0)
+                                       goto exit;
+                               i++;
 
-       return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
-               >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+                       } while ((ret == 1) || (i < 10));
+
+                       if (i == 10)
+                               ret = -EBUSY;
+               }
+       }
+exit:
+       return ret;
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -373,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
        .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
        .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
        .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
-       .get_timestamp = dwmac4_wrback_get_timestamp,
-       .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+       .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
+       .get_timestamp = dwmac4_get_timestamp,
        .set_tx_ic = dwmac4_rd_set_tx_ic,
        .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
        .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
index 0902a2edeaa9414cedd370f196d6bddab0c11c3a..9736c505211add1db476c3426c701a04961f4ed4 100644 (file)
 #define TDES3_CTXT_TCMSSV              BIT(26)
 
 /* TDES3 Common */
+#define        TDES3_RS1V                      BIT(26)
+#define        TDES3_RS1V_SHIFT                26
 #define TDES3_LAST_DESCRIPTOR          BIT(28)
 #define TDES3_LAST_DESCRIPTOR_SHIFT    28
 #define TDES3_FIRST_DESCRIPTOR         BIT(29)
 #define TDES3_CONTEXT_TYPE             BIT(30)
+#define        TDES3_CONTEXT_TYPE_SHIFT        30
 
 /* TDS3 use for both format (read and write back) */
 #define TDES3_OWN                      BIT(31)
 #define RDES3_LAST_DESCRIPTOR          BIT(28)
 #define RDES3_FIRST_DESCRIPTOR         BIT(29)
 #define RDES3_CONTEXT_DESCRIPTOR       BIT(30)
+#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
 
 /* RDES3 (read format) */
 #define RDES3_BUFFER1_VALID_ADDR       BIT(24)
index 38f19c99cf59e7382cc6a72d15deb2a50f1c4b96..e75549327c3451ad5aba452d20769fd31e403c2b 100644 (file)
@@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
                        x->ipv4_pkt_rcvd++;
                if (rdes4 & ERDES4_IPV6_PKT_RCVD)
                        x->ipv6_pkt_rcvd++;
-               if (message_type == RDES_EXT_SYNC)
-                       x->rx_msg_type_sync++;
+
+               if (message_type == RDES_EXT_NO_PTP)
+                       x->no_ptp_rx_msg_type_ext++;
+               else if (message_type == RDES_EXT_SYNC)
+                       x->ptp_rx_msg_type_sync++;
                else if (message_type == RDES_EXT_FOLLOW_UP)
-                       x->rx_msg_type_follow_up++;
+                       x->ptp_rx_msg_type_follow_up++;
                else if (message_type == RDES_EXT_DELAY_REQ)
-                       x->rx_msg_type_delay_req++;
+                       x->ptp_rx_msg_type_delay_req++;
                else if (message_type == RDES_EXT_DELAY_RESP)
-                       x->rx_msg_type_delay_resp++;
+                       x->ptp_rx_msg_type_delay_resp++;
                else if (message_type == RDES_EXT_PDELAY_REQ)
-                       x->rx_msg_type_pdelay_req++;
+                       x->ptp_rx_msg_type_pdelay_req++;
                else if (message_type == RDES_EXT_PDELAY_RESP)
-                       x->rx_msg_type_pdelay_resp++;
+                       x->ptp_rx_msg_type_pdelay_resp++;
                else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
-                       x->rx_msg_type_pdelay_follow_up++;
-               else
-                       x->rx_msg_type_ext_no_ptp++;
+                       x->ptp_rx_msg_type_pdelay_follow_up++;
+               else if (message_type == RDES_PTP_ANNOUNCE)
+                       x->ptp_rx_msg_type_announce++;
+               else if (message_type == RDES_PTP_MANAGEMENT)
+                       x->ptp_rx_msg_type_management++;
+               else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+                       x->ptp_rx_msg_pkt_reserved_type++;
+
                if (rdes4 & ERDES4_PTP_FRAME_TYPE)
                        x->ptp_frame_type++;
                if (rdes4 & ERDES4_PTP_VER)
index b15fc55f1b96ae10ad5333f4a5c2f7fe669dd234..4d2a759b84652165d8e70e4284af744f1edb3e75 100644 (file)
@@ -129,6 +129,7 @@ struct stmmac_priv {
        int irq_wake;
        spinlock_t ptp_lock;
        void __iomem *mmcaddr;
+       void __iomem *ptpaddr;
        u32 rx_tail_addr;
        u32 tx_tail_addr;
        u32 mss;
index 1e06173fc9d733d63c1e5ccbbc12c03506d0a44c..c5d0142adda2e00e958b0a851acc506769f8cdb5 100644 (file)
@@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
        STMMAC_STAT(ip_csum_bypassed),
        STMMAC_STAT(ipv4_pkt_rcvd),
        STMMAC_STAT(ipv6_pkt_rcvd),
-       STMMAC_STAT(rx_msg_type_ext_no_ptp),
-       STMMAC_STAT(rx_msg_type_sync),
-       STMMAC_STAT(rx_msg_type_follow_up),
-       STMMAC_STAT(rx_msg_type_delay_req),
-       STMMAC_STAT(rx_msg_type_delay_resp),
-       STMMAC_STAT(rx_msg_type_pdelay_req),
-       STMMAC_STAT(rx_msg_type_pdelay_resp),
-       STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+       STMMAC_STAT(no_ptp_rx_msg_type_ext),
+       STMMAC_STAT(ptp_rx_msg_type_sync),
+       STMMAC_STAT(ptp_rx_msg_type_follow_up),
+       STMMAC_STAT(ptp_rx_msg_type_delay_req),
+       STMMAC_STAT(ptp_rx_msg_type_delay_resp),
+       STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
+       STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
+       STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
+       STMMAC_STAT(ptp_rx_msg_type_announce),
+       STMMAC_STAT(ptp_rx_msg_type_management),
+       STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
        STMMAC_STAT(ptp_frame_type),
        STMMAC_STAT(ptp_ver),
        STMMAC_STAT(timestamp_dropped),
index a77f68918010d3a511c4a97cd3b2c83671f9f542..10d6059b2f26555af9963812f847b68109b9c959 100644 (file)
@@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
 }
 
 static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
-                                             u32 ptp_clock)
+                                             u32 ptp_clock, int gmac4)
 {
        u32 value = readl(ioaddr + PTP_TCR);
        unsigned long data;
 
-       /* Convert the ptp_clock to nano second
-        * formula = (2/ptp_clock) * 1000000000
-        * where, ptp_clock = 50MHz.
+       /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
+        *      formula = (1/ptp_clock) * 1000000000
+        * where ptp_clock is 50MHz if fine method is used to update system
         */
-       data = (2000000000ULL / ptp_clock);
+       if (value & PTP_TCR_TSCFUPDT)
+               data = (1000000000ULL / 50000000);
+       else
+               data = (1000000000ULL / ptp_clock);
 
        /* 0.465ns accuracy */
        if (!(value & PTP_TCR_TSCTRLSSR))
                data = (data * 1000) / 465;
 
+       data &= PTP_SSIR_SSINC_MASK;
+
+       if (gmac4)
+               data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+
        writel(data, ioaddr + PTP_SSIR);
 
        return data;
@@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
 }
 
 static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
-                                int add_sub)
+                                int add_sub, int gmac4)
 {
        u32 value;
        int limit;
 
+       if (add_sub) {
+               /* If the new sec value needs to be subtracted with
+                * the system time, then MAC_STSUR reg should be
+                * programmed with (2^32 â€“ <new_sec_value>)
+                */
+               if (gmac4)
+                       sec = (100000000ULL - sec);
+
+               value = readl(ioaddr + PTP_TCR);
+               if (value & PTP_TCR_TSCTRLSSR)
+                       nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
+               else
+                       nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
+       }
+
        writel(sec, ioaddr + PTP_STSUR);
-       writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
-               ioaddr + PTP_STNSUR);
+       value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+       writel(value, ioaddr + PTP_STNSUR);
+
        /* issue command to initialize the system time value */
        value = readl(ioaddr + PTP_TCR);
        value |= PTP_TCR_TSUPDT;
@@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
 {
        u64 ns;
 
+       /* Get the TSSS value */
        ns = readl(ioaddr + PTP_STNSR);
-       /* convert sec time value to nanosecond */
+       /* Get the TSS and convert sec time value to nanosecond */
        ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
 
        return ns;
index e2c94ec4edd03b4295e5c03d326b7c2e09975c90..1f9ec02fa7f856ecf461cc4c5bfa51e7cc79902e 100644 (file)
@@ -340,18 +340,17 @@ out:
 
 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
  * @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
  * @skb : the socket buffer
  * Description :
  * This function will read timestamp from the descriptor & pass it to stack.
  * and also perform some sanity checks.
  */
 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
-                                  unsigned int entry, struct sk_buff *skb)
+                                  struct dma_desc *p, struct sk_buff *skb)
 {
        struct skb_shared_hwtstamps shhwtstamp;
        u64 ns;
-       void *desc = NULL;
 
        if (!priv->hwts_tx_en)
                return;
@@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
        if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
                return;
 
-       if (priv->adv_ts)
-               desc = (priv->dma_etx + entry);
-       else
-               desc = (priv->dma_tx + entry);
-
        /* check tx tstamp status */
-       if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
-               return;
+       if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+               /* get the valid tstamp */
+               ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 
-       /* get the valid tstamp */
-       ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+               memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+               shhwtstamp.hwtstamp = ns_to_ktime(ns);
 
-       memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
-       shhwtstamp.hwtstamp = ns_to_ktime(ns);
-       /* pass tstamp to stack */
-       skb_tstamp_tx(skb, &shhwtstamp);
+               netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+               /* pass tstamp to stack */
+               skb_tstamp_tx(skb, &shhwtstamp);
+       }
 
        return;
 }
 
 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
  * @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
  * @skb : the socket buffer
  * Description :
  * This function will read received packet's timestamp from the descriptor
  * and pass it to stack. It also perform some sanity checks.
  */
-static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
-                                  unsigned int entry, struct sk_buff *skb)
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+                                  struct dma_desc *np, struct sk_buff *skb)
 {
        struct skb_shared_hwtstamps *shhwtstamp = NULL;
        u64 ns;
-       void *desc = NULL;
 
        if (!priv->hwts_rx_en)
                return;
 
-       if (priv->adv_ts)
-               desc = (priv->dma_erx + entry);
-       else
-               desc = (priv->dma_rx + entry);
-
-       /* exit if rx tstamp is not valid */
-       if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
-               return;
+       /* Check if timestamp is available */
+       if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+               /* For GMAC4, the valid timestamp is from CTX next desc. */
+               if (priv->plat->has_gmac4)
+                       ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
+               else
+                       ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 
-       /* get valid tstamp */
-       ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
-       shhwtstamp = skb_hwtstamps(skb);
-       memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
-       shhwtstamp->hwtstamp = ns_to_ktime(ns);
+               netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+               shhwtstamp = skb_hwtstamps(skb);
+               memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+               shhwtstamp->hwtstamp = ns_to_ktime(ns);
+       } else  {
+               netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+       }
 }
 
 /**
@@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 
        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
-               priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+               priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
        else {
                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
                         tstamp_all | ptp_v2 | ptp_over_ethernet |
                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
                         ts_master_en | snap_type_sel);
-               priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+               priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
 
                /* program Sub Second Increment reg */
                sec_inc = priv->hw->ptp->config_sub_second_increment(
-                       priv->ioaddr, priv->clk_ptp_rate);
+                       priv->ptpaddr, priv->clk_ptp_rate,
+                       priv->plat->has_gmac4);
                temp = div_u64(1000000000ULL, sec_inc);
 
                /* calculate default added value:
@@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                 */
                temp = (u64)(temp << 32);
                priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
-               priv->hw->ptp->config_addend(priv->ioaddr,
+               priv->hw->ptp->config_addend(priv->ptpaddr,
                                             priv->default_addend);
 
                /* initialize system time */
                ktime_get_real_ts64(&now);
 
                /* lower 32 bits of tv_sec are safe until y2106 */
-               priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
+               priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
                                            now.tv_nsec);
        }
 
@@ -1340,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                                priv->dev->stats.tx_packets++;
                                priv->xstats.tx_pkt_n++;
                        }
-                       stmmac_get_tx_hwtstamp(priv, entry, skb);
+                       stmmac_get_tx_hwtstamp(priv, p, skb);
                }
 
                if (likely(priv->tx_skbuff_dma[entry].buf)) {
@@ -1486,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
 
-       if (priv->synopsys_id >= DWMAC_CORE_4_00)
+       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+               priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
                priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
-       else
+       } else {
+               priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
                priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+       }
 
        dwmac_mmc_intr_all_mask(priv->mmcaddr);
 
@@ -2484,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
        if (netif_msg_rx_status(priv)) {
                void *rx_head;
 
-               pr_debug("%s: descriptor ring:\n", __func__);
+               pr_info(">>>>>> %s: descriptor ring:\n", __func__);
                if (priv->extend_desc)
                        rx_head = (void *)priv->dma_erx;
                else
@@ -2495,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
        while (count < limit) {
                int status;
                struct dma_desc *p;
+               struct dma_desc *np;
 
                if (priv->extend_desc)
                        p = (struct dma_desc *)(priv->dma_erx + entry);
@@ -2514,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                next_entry = priv->cur_rx;
 
                if (priv->extend_desc)
-                       prefetch(priv->dma_erx + next_entry);
+                       np = (struct dma_desc *)(priv->dma_erx + next_entry);
                else
-                       prefetch(priv->dma_rx + next_entry);
+                       np = priv->dma_rx + next_entry;
+
+               prefetch(np);
 
                if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
                        priv->hw->desc->rx_extended_status(&priv->dev->stats,
@@ -2568,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                                frame_len -= ETH_FCS_LEN;
 
                        if (netif_msg_rx_status(priv)) {
-                               pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+                               pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
                                        p, entry, des);
                                if (frame_len > ETH_FRAME_LEN)
                                        pr_debug("\tframe size %d, COE: %d\n",
@@ -2625,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                                                 DMA_FROM_DEVICE);
                        }
 
-                       stmmac_get_rx_hwtstamp(priv, entry, skb);
-
                        if (netif_msg_pktdata(priv)) {
                                pr_debug("frame received (%dbytes)", frame_len);
                                print_pkt(skb->data, frame_len);
                        }
 
+                       stmmac_get_rx_hwtstamp(priv, p, np, skb);
+
                        stmmac_rx_vlan(priv->dev, skb);
 
                        skb->protocol = eth_type_trans(skb, priv->dev);
index 1477471f8d4455ab27151c617a0c92a488648562..3eb281d1db08a94ff76a3d3d21df367966a036d6 100644 (file)
@@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
 
        spin_lock_irqsave(&priv->ptp_lock, flags);
 
-       priv->hw->ptp->config_addend(priv->ioaddr, addend);
+       priv->hw->ptp->config_addend(priv->ptpaddr, addend);
 
        spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
@@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
 
        spin_lock_irqsave(&priv->ptp_lock, flags);
 
-       priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+       priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
+                                     priv->plat->has_gmac4);
 
        spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
@@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
 
        spin_lock_irqsave(&priv->ptp_lock, flags);
 
-       ns = priv->hw->ptp->get_systime(priv->ioaddr);
+       ns = priv->hw->ptp->get_systime(priv->ptpaddr);
 
        spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
@@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
 
        spin_lock_irqsave(&priv->ptp_lock, flags);
 
-       priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+       priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
 
        spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
index 4535df37c22767824d1f7bbe6db56a8e3d0644ab..c06938c47af5549658c19e9c64a568595d80510a 100644 (file)
   Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
 ******************************************************************************/
 
-#ifndef __STMMAC_PTP_H__
-#define __STMMAC_PTP_H__
+#ifndef        __STMMAC_PTP_H__
+#define        __STMMAC_PTP_H__
 
-/* IEEE 1588 PTP register offsets */
-#define PTP_TCR                0x0700  /* Timestamp Control Reg */
-#define PTP_SSIR       0x0704  /* Sub-Second Increment Reg */
-#define PTP_STSR       0x0708  /* System Time â€“ Seconds Regr */
-#define PTP_STNSR      0x070C  /* System Time â€“ Nanoseconds Reg */
-#define PTP_STSUR      0x0710  /* System Time â€“ Seconds Update Reg */
-#define PTP_STNSUR     0x0714  /* System Time â€“ Nanoseconds Update Reg */
-#define PTP_TAR                0x0718  /* Timestamp Addend Reg */
-#define PTP_TTSR       0x071C  /* Target Time Seconds Reg */
-#define PTP_TTNSR      0x0720  /* Target Time Nanoseconds Reg */
-#define        PTP_STHWSR      0x0724  /* System Time - Higher Word Seconds Reg */
-#define PTP_TSR                0x0728  /* Timestamp Status */
+#define        PTP_GMAC4_OFFSET        0xb00
+#define        PTP_GMAC3_X_OFFSET      0x700
 
-#define PTP_STNSUR_ADDSUB_SHIFT 31
+/* IEEE 1588 PTP register offsets */
+#define        PTP_TCR         0x00    /* Timestamp Control Reg */
+#define        PTP_SSIR        0x04    /* Sub-Second Increment Reg */
+#define        PTP_STSR        0x08    /* System Time â€“ Seconds Regr */
+#define        PTP_STNSR       0x0c    /* System Time â€“ Nanoseconds Reg */
+#define        PTP_STSUR       0x10    /* System Time â€“ Seconds Update Reg */
+#define        PTP_STNSUR      0x14    /* System Time â€“ Nanoseconds Update Reg */
+#define        PTP_TAR         0x18    /* Timestamp Addend Reg */
 
-/* PTP TCR defines */
-#define PTP_TCR_TSENA          0x00000001 /* Timestamp Enable */
-#define PTP_TCR_TSCFUPDT       0x00000002 /* Timestamp Fine/Coarse Update */
-#define PTP_TCR_TSINIT         0x00000004 /* Timestamp Initialize */
-#define PTP_TCR_TSUPDT         0x00000008 /* Timestamp Update */
-/* Timestamp Interrupt Trigger Enable */
-#define PTP_TCR_TSTRIG         0x00000010
-#define PTP_TCR_TSADDREG       0x00000020 /* Addend Reg Update */
-#define PTP_TCR_TSENALL                0x00000100 /* Enable Timestamp for All Frames */
-/* Timestamp Digital or Binary Rollover Control */
-#define PTP_TCR_TSCTRLSSR      0x00000200
+#define        PTP_STNSUR_ADDSUB_SHIFT 31
+#define        PTP_DIGITAL_ROLLOVER_MODE       0x3B9ACA00      /* 10e9-1 ns */
+#define        PTP_BINARY_ROLLOVER_MODE        0x80000000      /* ~0.466 ns */
 
+/* PTP Timestamp control register defines */
+#define        PTP_TCR_TSENA           BIT(0)  /* Timestamp Enable */
+#define        PTP_TCR_TSCFUPDT        BIT(1)  /* Timestamp Fine/Coarse Update */
+#define        PTP_TCR_TSINIT          BIT(2)  /* Timestamp Initialize */
+#define        PTP_TCR_TSUPDT          BIT(3)  /* Timestamp Update */
+#define        PTP_TCR_TSTRIG          BIT(4)  /* Timestamp Interrupt Trigger Enable */
+#define        PTP_TCR_TSADDREG        BIT(5)  /* Addend Reg Update */
+#define        PTP_TCR_TSENALL         BIT(8)  /* Enable Timestamp for All Frames */
+#define        PTP_TCR_TSCTRLSSR       BIT(9)  /* Digital or Binary Rollover Control */
 /* Enable PTP packet Processing for Version 2 Format */
-#define PTP_TCR_TSVER2ENA      0x00000400
+#define        PTP_TCR_TSVER2ENA       BIT(10)
 /* Enable Processing of PTP over Ethernet Frames */
-#define PTP_TCR_TSIPENA                0x00000800
+#define        PTP_TCR_TSIPENA         BIT(11)
 /* Enable Processing of PTP Frames Sent over IPv6-UDP */
-#define PTP_TCR_TSIPV6ENA      0x00001000
+#define        PTP_TCR_TSIPV6ENA       BIT(12)
 /* Enable Processing of PTP Frames Sent over IPv4-UDP */
-#define PTP_TCR_TSIPV4ENA      0x00002000
+#define        PTP_TCR_TSIPV4ENA       BIT(13)
 /* Enable Timestamp Snapshot for Event Messages */
-#define PTP_TCR_TSEVNTENA      0x00004000
+#define        PTP_TCR_TSEVNTENA       BIT(14)
 /* Enable Snapshot for Messages Relevant to Master */
-#define PTP_TCR_TSMSTRENA      0x00008000
+#define        PTP_TCR_TSMSTRENA       BIT(15)
 /* Select PTP packets for Taking Snapshots */
-#define PTP_TCR_SNAPTYPSEL_1   0x00010000
+#define        PTP_TCR_SNAPTYPSEL_1    GENMASK(17, 16)
 /* Enable MAC address for PTP Frame Filtering */
-#define PTP_TCR_TSENMACADDR    0x00040000
+#define        PTP_TCR_TSENMACADDR     BIT(18)
+
+/* SSIR defines */
+#define        PTP_SSIR_SSINC_MASK             0xff
+#define        GMAC4_PTP_SSIR_SSINC_SHIFT      16
 
-#endif /* __STMMAC_PTP_H__ */
+#endif /* __STMMAC_PTP_H__ */
index aa4f9d2d8fa98f2b5c4a63710c24ad5387f5a73f..02f452730d52ae26fedc12240311f2467dfd942b 100644 (file)
@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
        void __iomem *gregs        = bp->gregs;
        void __iomem *cregs        = bp->creg;
        void __iomem *bregs        = bp->bregs;
+       __u32 bblk_dvma = (__u32)bp->bblock_dvma;
        unsigned char *e = &bp->dev->dev_addr[0];
 
        /* Latch current counters into statistics. */
@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
                    bregs + BMAC_XIFCFG);
 
        /* Tell the QEC where the ring descriptors are. */
-       sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+       sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
                    cregs + CREG_RXDS);
-       sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+       sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
                    cregs + CREG_TXDS);
 
        /* Setup the FIFO pointers into QEC local memory. */
index 06dd21707353594b2150ec8afac12c440232160d..532fc56830cf319b3067b3caa6c8149f1ac115d6 100644 (file)
@@ -291,7 +291,7 @@ struct bigmac {
        void __iomem    *bregs; /* BigMAC Registers                   */
        void __iomem    *tregs; /* BigMAC Transceiver                 */
        struct bmac_init_block  *bmac_block;    /* RX and TX descriptors */
-       __u32                    bblock_dvma;   /* RX and TX descriptors */
+       dma_addr_t              bblock_dvma;    /* RX and TX descriptors */
 
        spinlock_t              lock;
 
index 9b825780b3be02829b54799b0e287ed2c8976a09..9582948145c172826d5321274b59ae7a653b1132 100644 (file)
@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
 {
        struct qe_init_block *qb = qep->qe_block;
        struct sunqe_buffers *qbufs = qep->buffers;
-       __u32 qbufs_dvma = qep->buffers_dvma;
+       __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
        int i;
 
        qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
        void __iomem *mregs = qep->mregs;
        void __iomem *gregs = qecp->gregs;
        unsigned char *e = &qep->dev->dev_addr[0];
+       __u32 qblk_dvma = (__u32)qep->qblock_dvma;
        u32 tmp;
        int i;
 
@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
                return -EAGAIN;
 
        /* Setup initial rx/tx init block pointers. */
-       sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
-       sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+       sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+       sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
 
        /* Enable/mask the various irq's. */
        sbus_writel(0, cregs + CREG_RIMASK);
@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
        struct net_device *dev = qep->dev;
        struct qe_rxd *this;
        struct sunqe_buffers *qbufs = qep->buffers;
-       __u32 qbufs_dvma = qep->buffers_dvma;
+       __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
        int elem = qep->rx_new;
        u32 flags;
 
@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct sunqe *qep = netdev_priv(dev);
        struct sunqe_buffers *qbufs = qep->buffers;
-       __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+       __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
        unsigned char *txbuf;
        int len, entry;
 
index 581781b6b2fac9d6bfb4bfae9735d674f6d46eec..ae190b77431b14b8f63b465155e9762727ccad77 100644 (file)
@@ -334,12 +334,12 @@ struct sunqe {
        void __iomem                    *qcregs;                /* QEC per-channel Registers   */
        void __iomem                    *mregs;         /* Per-channel MACE Registers  */
        struct qe_init_block            *qe_block;      /* RX and TX descriptors       */
-       __u32                           qblock_dvma;    /* RX and TX descriptors       */
+       dma_addr_t                      qblock_dvma;    /* RX and TX descriptors       */
        spinlock_t                      lock;           /* Protects txfull state       */
        int                             rx_new, rx_old; /* RX ring extents             */
        int                             tx_new, tx_old; /* TX ring extents             */
        struct sunqe_buffers            *buffers;       /* CPU visible address.        */
-       __u32                           buffers_dvma;   /* DVMA visible address.       */
+       dma_addr_t                      buffers_dvma;   /* DVMA visible address.       */
        struct sunqec                   *parent;
        u8                              mconfig;        /* Base MACE mconfig value     */
        struct platform_device          *op;            /* QE's OF device struct       */
index c6cff3d2ff050c33c65416a1c8e6c06c44358936..58947aae31c7ecba94205838b633d2972d68cee2 100644 (file)
@@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                         * to the PHY is the Ethernet MAC DT node.
                         */
                        ret = of_phy_register_fixed_link(slave_node);
-                       if (ret)
+                       if (ret) {
+                               if (ret != -EPROBE_DEFER)
+                                       dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
                                return ret;
+                       }
                        slave_data->phy_node = of_node_get(slave_node);
                } else if (parp) {
                        u32 phyid;
@@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                        }
                        snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                                 PHY_ID_FMT, mdio->name, phyid);
+                       put_device(&mdio->dev);
                } else {
                        dev_err(&pdev->dev,
                                "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
@@ -2440,6 +2444,46 @@ no_phy_slave:
        return 0;
 }
 
+static void cpsw_remove_dt(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+       struct cpsw_platform_data *data = &cpsw->data;
+       struct device_node *node = pdev->dev.of_node;
+       struct device_node *slave_node;
+       int i = 0;
+
+       for_each_available_child_of_node(node, slave_node) {
+               struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+               if (strcmp(slave_node->name, "slave"))
+                       continue;
+
+               if (of_phy_is_fixed_link(slave_node)) {
+                       struct phy_device *phydev;
+
+                       phydev = of_phy_find_device(slave_node);
+                       if (phydev) {
+                               fixed_phy_unregister(phydev);
+                               /* Put references taken by
+                                * of_phy_find_device() and
+                                * of_phy_register_fixed_link().
+                                */
+                               phy_device_free(phydev);
+                               phy_device_free(phydev);
+                       }
+               }
+
+               of_node_put(slave_data->phy_node);
+
+               i++;
+               if (i == data->slaves)
+                       break;
+       }
+
+       of_platform_depopulate(&pdev->dev);
+}
+
 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
 {
        struct cpsw_common              *cpsw = priv->cpsw;
@@ -2547,6 +2591,9 @@ static int cpsw_probe(struct platform_device *pdev)
        int irq;
 
        cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+       if (!cpsw)
+               return -ENOMEM;
+
        cpsw->dev = &pdev->dev;
 
        ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
@@ -2584,11 +2631,19 @@ static int cpsw_probe(struct platform_device *pdev)
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (cpsw_probe_dt(&cpsw->data, pdev)) {
-               dev_err(&pdev->dev, "cpsw: platform data missing\n");
-               ret = -ENODEV;
+       /* Need to enable clocks with runtime PM api to access module
+        * registers
+        */
+       ret = pm_runtime_get_sync(&pdev->dev);
+       if (ret < 0) {
+               pm_runtime_put_noidle(&pdev->dev);
                goto clean_runtime_disable_ret;
        }
+
+       ret = cpsw_probe_dt(&cpsw->data, pdev);
+       if (ret)
+               goto clean_dt_ret;
+
        data = &cpsw->data;
        cpsw->rx_ch_num = 1;
        cpsw->tx_ch_num = 1;
@@ -2608,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev)
                                    GFP_KERNEL);
        if (!cpsw->slaves) {
                ret = -ENOMEM;
-               goto clean_runtime_disable_ret;
+               goto clean_dt_ret;
        }
        for (i = 0; i < data->slaves; i++)
                cpsw->slaves[i].slave_num = i;
@@ -2620,7 +2675,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (IS_ERR(clk)) {
                dev_err(priv->dev, "fck is not found\n");
                ret = -ENODEV;
-               goto clean_runtime_disable_ret;
+               goto clean_dt_ret;
        }
        cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
 
@@ -2628,26 +2683,17 @@ static int cpsw_probe(struct platform_device *pdev)
        ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
        if (IS_ERR(ss_regs)) {
                ret = PTR_ERR(ss_regs);
-               goto clean_runtime_disable_ret;
+               goto clean_dt_ret;
        }
        cpsw->regs = ss_regs;
 
-       /* Need to enable clocks with runtime PM api to access module
-        * registers
-        */
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(&pdev->dev);
-               goto clean_runtime_disable_ret;
-       }
        cpsw->version = readl(&cpsw->regs->id_ver);
-       pm_runtime_put_sync(&pdev->dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(cpsw->wr_regs)) {
                ret = PTR_ERR(cpsw->wr_regs);
-               goto clean_runtime_disable_ret;
+               goto clean_dt_ret;
        }
 
        memset(&dma_params, 0, sizeof(dma_params));
@@ -2684,7 +2730,7 @@ static int cpsw_probe(struct platform_device *pdev)
        default:
                dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
                ret = -ENODEV;
-               goto clean_runtime_disable_ret;
+               goto clean_dt_ret;
        }
        for (i = 0; i < cpsw->data.slaves; i++) {
                struct cpsw_slave *slave = &cpsw->slaves[i];
@@ -2713,7 +2759,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (!cpsw->dma) {
                dev_err(priv->dev, "error initializing dma\n");
                ret = -ENOMEM;
-               goto clean_runtime_disable_ret;
+               goto clean_dt_ret;
        }
 
        cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
@@ -2811,16 +2857,23 @@ static int cpsw_probe(struct platform_device *pdev)
                ret = cpsw_probe_dual_emac(priv);
                if (ret) {
                        cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
-                       goto clean_ale_ret;
+                       goto clean_unregister_netdev_ret;
                }
        }
 
+       pm_runtime_put(&pdev->dev);
+
        return 0;
 
+clean_unregister_netdev_ret:
+       unregister_netdev(ndev);
 clean_ale_ret:
        cpsw_ale_destroy(cpsw->ale);
 clean_dma_ret:
        cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+       cpsw_remove_dt(pdev);
+       pm_runtime_put_sync(&pdev->dev);
 clean_runtime_disable_ret:
        pm_runtime_disable(&pdev->dev);
 clean_ndev_ret:
@@ -2846,7 +2899,7 @@ static int cpsw_remove(struct platform_device *pdev)
 
        cpsw_ale_destroy(cpsw->ale);
        cpdma_ctlr_destroy(cpsw->dma);
-       of_platform_depopulate(&pdev->dev);
+       cpsw_remove_dt(pdev);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        if (cpsw->data.dual_emac)
index c649c101bbaba97ec44255b9ee1e7a878c26ada5..eb51672106811e35ee0a7b3f0578560e16bc3670 100644 (file)
@@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register);
 void fixed_phy_unregister(struct phy_device *phy)
 {
        phy_device_remove(phy);
-
+       of_node_put(phy->mdio.dev.of_node);
        fixed_phy_del(phy->mdio.addr);
 }
 EXPORT_SYMBOL_GPL(fixed_phy_unregister);
index 2e37eb337d4868715606b92dab65cfeb60a37679..24b4a09468dd0f5784d6bfd32814a73f6302a1e2 100644 (file)
 /* Vitesse Extended Page Access Register */
 #define MII_VSC82X4_EXT_PAGE_ACCESS    0x1f
 
+/* Vitesse VSC8601 Extended PHY Control Register 1 */
+#define MII_VSC8601_EPHY_CTL           0x17
+#define MII_VSC8601_EPHY_CTL_RGMII_SKEW        (1 << 8)
+
 #define PHY_ID_VSC8234                 0x000fc620
 #define PHY_ID_VSC8244                 0x000fc6c0
 #define PHY_ID_VSC8514                 0x00070670
@@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
        return err;
 }
 
+/* This adds a skew for both TX and RX clocks, so the skew should only be
+ * applied to "rgmii-id" interfaces. It may not work as expected
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
+static int vsc8601_add_skew(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
+       if (ret < 0)
+               return ret;
+
+       ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
+       return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
+}
+
+static int vsc8601_config_init(struct phy_device *phydev)
+{
+       int ret = 0;
+
+       if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+               ret = vsc8601_add_skew(phydev);
+
+       if (ret < 0)
+               return ret;
+
+       return genphy_config_init(phydev);
+}
+
 static int vsc824x_ack_interrupt(struct phy_device *phydev)
 {
        int err = 0;
@@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id_mask    = 0x000ffff0,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_init    = &genphy_config_init,
+       .config_init    = &vsc8601_config_init,
        .config_aneg    = &genphy_config_aneg,
        .read_status    = &genphy_read_status,
        .ack_interrupt  = &vsc824x_ack_interrupt,
index fd8b1e62301f825ba35a888f8caf2ecb004481df..7276d5a95bd0ee417a0051990dcb86824e2523d2 100644 (file)
@@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
                netif_napi_del(&vi->rq[i].napi);
        }
 
+       /* We called napi_hash_del() before netif_napi_del(),
+        * we need to respect an RCU grace period before freeing vi->rq
+        */
+       synchronize_net();
+
        kfree(vi->rq);
        kfree(vi->sq);
 }
index 431f13b4faf690c885fbd459b257de5d103406a8..d3bad577937622c868257892cf1658c561c3b947 100644 (file)
@@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
                data->bcn_delta = do_div(delta, bcn_int);
        } else {
                data->tsf_offset -= delta;
-               data->bcn_delta = -do_div(delta, bcn_int);
+               data->bcn_delta = -(s64)do_div(delta, bcn_int);
        }
 }
 
index b470f7e3521d49c73877b6156bc9db7b926fdc51..5a3145a025470dc4c86f8d191a139a8acb85394f 100644 (file)
@@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
                mdiodev = to_mdio_device(d);
                if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
                        return to_phy_device(d);
+               put_device(d);
        }
 
        return NULL;
@@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np)
                status.link = 1;
                status.duplex = of_property_read_bool(fixed_link_node,
                                                      "full-duplex");
-               if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+               if (of_property_read_u32(fixed_link_node, "speed",
+                                        &status.speed)) {
+                       of_node_put(fixed_link_node);
                        return -EINVAL;
+               }
                status.pause = of_property_read_bool(fixed_link_node, "pause");
                status.asym_pause = of_property_read_bool(fixed_link_node,
                                                          "asym-pause");
index 87e6334eab9309f85e8cb5308f96dec54de7f495..547ca7b3f09850ce0f0928b4fe0a26cbcaab14d2 100644 (file)
@@ -459,8 +459,6 @@ static int twl4030_phy_power_off(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
        dev_dbg(twl->dev, "%s\n", __func__);
-       pm_runtime_mark_last_busy(twl->dev);
-       pm_runtime_put_autosuspend(twl->dev);
 
        return 0;
 }
@@ -472,6 +470,8 @@ static int twl4030_phy_power_on(struct phy *phy)
        dev_dbg(twl->dev, "%s\n", __func__);
        pm_runtime_get_sync(twl->dev);
        schedule_delayed_work(&twl->id_workaround_work, HZ);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
 
        return 0;
 }
index 8aa769a2d919d31f45619d32c4346a36753281d7..91b70bc46e7f184ece7d81ac233cb5957b080943 100644 (file)
@@ -4010,7 +4010,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
            SAM_STAT_CHECK_CONDITION;
 }
 
-
+static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+{
+       return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+}
 
 /**
  * scsih_qcmd - main scsi request entry point
@@ -4038,6 +4041,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        if (ioc->logging_level & MPT_DEBUG_SCSI)
                scsi_print_command(scmd);
 
+       /*
+        * Lock the device for any subsequent command until command is
+        * done.
+        */
+       if (ata_12_16_cmd(scmd))
+               scsi_internal_device_block(scmd->device);
+
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -4613,6 +4623,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        if (scmd == NULL)
                return 1;
 
+       if (ata_12_16_cmd(scmd))
+               scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 
        if (mpi_reply == NULL) {
index 567fa080e261c693845a2755093f7b5042b68b9d..56d6142852a553ed9ad8011cb4c18a84e8656e0d 100644 (file)
@@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                        sp = req->outstanding_cmds[cnt];
                        if (sp) {
-                               /* Get a reference to the sp and drop the lock.
-                                * The reference ensures this sp->done() call
-                                * - and not the call in qla2xxx_eh_abort() -
-                                * ends the SCSI command (with result 'res').
+                               /* Don't abort commands in adapter during EEH
+                                * recovery as it's not accessible/responding.
                                 */
-                               sp_get(sp);
-                               spin_unlock_irqrestore(&ha->hardware_lock, flags);
-                               qla2xxx_eh_abort(GET_CMD_SP(sp));
-                               spin_lock_irqsave(&ha->hardware_lock, flags);
+                               if (!ha->flags.eeh_busy) {
+                                       /* Get a reference to the sp and drop the lock.
+                                        * The reference ensures this sp->done() call
+                                        * - and not the call in qla2xxx_eh_abort() -
+                                        * ends the SCSI command (with result 'res').
+                                        */
+                                       sp_get(sp);
+                                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                                       qla2xxx_eh_abort(GET_CMD_SP(sp));
+                                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                               }
                                req->outstanding_cmds[cnt] = NULL;
                                sp->done(vha, sp, res);
                        }
index 7a223074df3d4338ede8ad0192ce12d0dcb0c1ce..afada655f86198366b88b0d379a4bb79c8095bbb 100644 (file)
@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
        .set_cur_state = powerclamp_set_cur_state,
 };
 
+static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
+       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
 static int __init powerclamp_probe(void)
 {
-       if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+
+       if (!x86_match_cpu(intel_powerclamp_ids)) {
                pr_err("CPU does not support MWAIT");
                return -ENODEV;
        }
index 69426e644d17019b1c6e2d672b2e1a04cbffc3fd..3dbb4a21ab44c8a6555f5e2ad2a321a61526e23b 100644 (file)
@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
        if (!ci)
                return -ENOMEM;
 
+       spin_lock_init(&ci->lock);
        ci->dev = dev;
        ci->platdata = dev_get_platdata(dev);
        ci->imx28_write_fix = !!(ci->platdata->flags &
index 661f43fe0f9e9e8f9d9be29eaed5bb1307b8df54..c9e80ad48fdcdb0912271a336600167ff91d7276 100644 (file)
@@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci)
        struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
        int retval = 0;
 
-       spin_lock_init(&ci->lock);
-
        ci->gadget.ops          = &usb_gadget_ops;
        ci->gadget.speed        = USB_SPEED_UNKNOWN;
        ci->gadget.max_speed    = USB_SPEED_HIGH;
index e40d47d47d82ad9afd7124ef3ab0151b6e6ecfa7..17989b72cdaec18dbf55d0709897fc75d1842ab7 100644 (file)
@@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f,
 
        switch (creq->bRequestType & USB_RECIP_MASK) {
        case USB_RECIP_INTERFACE:
-               return ffs_func_revmap_intf(func,
-                                           le16_to_cpu(creq->wIndex) >= 0);
+               return (ffs_func_revmap_intf(func,
+                                            le16_to_cpu(creq->wIndex)) >= 0);
        case USB_RECIP_ENDPOINT:
-               return ffs_func_revmap_ep(func,
-                                         le16_to_cpu(creq->wIndex) >= 0);
+               return (ffs_func_revmap_ep(func,
+                                          le16_to_cpu(creq->wIndex)) >= 0);
        default:
                return (bool) (func->ffs->user_flags &
                               FUNCTIONFS_ALL_CTRL_RECIP);
index e01116e4c0671c18f0c3905c9474ac4ca1f7375a..c3e172e15ec3d9d9ec7346b972b2702fed710c65 100644 (file)
@@ -986,7 +986,7 @@ b_host:
        }
 #endif
 
-       schedule_work(&musb->irq_work);
+       schedule_delayed_work(&musb->irq_work, 0);
 
        return handled;
 }
@@ -1855,14 +1855,23 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
        case MUSB_QUIRK_B_INVALID_VBUS_91:
-               if (!musb->session && !musb->quirk_invalid_vbus) {
-                       musb->quirk_invalid_vbus = true;
+               if (musb->quirk_retries--) {
                        musb_dbg(musb,
-                                "First invalid vbus, assume no session");
+                                "Poll devctl on invalid vbus, assume no session");
+                       schedule_delayed_work(&musb->irq_work,
+                                             msecs_to_jiffies(1000));
+
                        return;
                }
-               break;
        case MUSB_QUIRK_A_DISCONNECT_19:
+               if (musb->quirk_retries--) {
+                       musb_dbg(musb,
+                                "Poll devctl on possible host mode disconnect");
+                       schedule_delayed_work(&musb->irq_work,
+                                             msecs_to_jiffies(1000));
+
+                       return;
+               }
                if (!musb->session)
                        break;
                musb_dbg(musb, "Allow PM on possible host mode disconnect");
@@ -1886,9 +1895,9 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                if (error < 0)
                        dev_err(musb->controller, "Could not enable: %i\n",
                                error);
+               musb->quirk_retries = 3;
        } else {
                musb_dbg(musb, "Allow PM with no session: %02x", devctl);
-               musb->quirk_invalid_vbus = false;
                pm_runtime_mark_last_busy(musb->controller);
                pm_runtime_put_autosuspend(musb->controller);
        }
@@ -1899,7 +1908,7 @@ static void musb_pm_runtime_check_session(struct musb *musb)
 /* Only used to provide driver mode change events */
 static void musb_irq_work(struct work_struct *data)
 {
-       struct musb *musb = container_of(data, struct musb, irq_work);
+       struct musb *musb = container_of(data, struct musb, irq_work.work);
 
        musb_pm_runtime_check_session(musb);
 
@@ -1969,6 +1978,7 @@ static struct musb *allocate_instance(struct device *dev,
        INIT_LIST_HEAD(&musb->control);
        INIT_LIST_HEAD(&musb->in_bulk);
        INIT_LIST_HEAD(&musb->out_bulk);
+       INIT_LIST_HEAD(&musb->pending_list);
 
        musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
        musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -2018,6 +2028,84 @@ static void musb_free(struct musb *musb)
        musb_host_free(musb);
 }
 
+struct musb_pending_work {
+       int (*callback)(struct musb *musb, void *data);
+       void *data;
+       struct list_head node;
+};
+
+/*
+ * Called from musb_runtime_resume(), musb_resume(), and
+ * musb_queue_resume_work(). Callers must take musb->lock.
+ */
+static int musb_run_resume_work(struct musb *musb)
+{
+       struct musb_pending_work *w, *_w;
+       unsigned long flags;
+       int error = 0;
+
+       spin_lock_irqsave(&musb->list_lock, flags);
+       list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
+               if (w->callback) {
+                       error = w->callback(musb, w->data);
+                       if (error < 0) {
+                               dev_err(musb->controller,
+                                       "resume callback %p failed: %i\n",
+                                       w->callback, error);
+                       }
+               }
+               list_del(&w->node);
+               devm_kfree(musb->controller, w);
+       }
+       spin_unlock_irqrestore(&musb->list_lock, flags);
+
+       return error;
+}
+
+/*
+ * Called to run work if device is active or else queue the work to happen
+ * on resume. Caller must take musb->lock and must hold an RPM reference.
+ *
+ * Note that we cowardly refuse queuing work after musb PM runtime
+ * resume is done calling musb_run_resume_work() and return -EINPROGRESS
+ * instead.
+ */
+int musb_queue_resume_work(struct musb *musb,
+                          int (*callback)(struct musb *musb, void *data),
+                          void *data)
+{
+       struct musb_pending_work *w;
+       unsigned long flags;
+       int error;
+
+       if (WARN_ON(!callback))
+               return -EINVAL;
+
+       if (pm_runtime_active(musb->controller))
+               return callback(musb, data);
+
+       w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+       if (!w)
+               return -ENOMEM;
+
+       w->callback = callback;
+       w->data = data;
+       spin_lock_irqsave(&musb->list_lock, flags);
+       if (musb->is_runtime_suspended) {
+               list_add_tail(&w->node, &musb->pending_list);
+               error = 0;
+       } else {
+               dev_err(musb->controller, "could not add resume work %p\n",
+                       callback);
+               devm_kfree(musb->controller, w);
+               error = -EINPROGRESS;
+       }
+       spin_unlock_irqrestore(&musb->list_lock, flags);
+
+       return error;
+}
+EXPORT_SYMBOL_GPL(musb_queue_resume_work);
+
 static void musb_deassert_reset(struct work_struct *work)
 {
        struct musb *musb;
@@ -2065,6 +2153,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        }
 
        spin_lock_init(&musb->lock);
+       spin_lock_init(&musb->list_lock);
        musb->board_set_power = plat->set_power;
        musb->min_power = plat->min_power;
        musb->ops = plat->platform_ops;
@@ -2208,7 +2297,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_generic_disable(musb);
 
        /* Init IRQ workqueue before request_irq */
-       INIT_WORK(&musb->irq_work, musb_irq_work);
+       INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
        INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
        INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
 
@@ -2291,6 +2380,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (status)
                goto fail5;
 
+       musb->is_initialized = 1;
        pm_runtime_mark_last_busy(musb->controller);
        pm_runtime_put_autosuspend(musb->controller);
 
@@ -2304,7 +2394,7 @@ fail4:
        musb_host_cleanup(musb);
 
 fail3:
-       cancel_work_sync(&musb->irq_work);
+       cancel_delayed_work_sync(&musb->irq_work);
        cancel_delayed_work_sync(&musb->finish_resume_work);
        cancel_delayed_work_sync(&musb->deassert_reset_work);
        if (musb->dma_controller)
@@ -2371,7 +2461,7 @@ static int musb_remove(struct platform_device *pdev)
         */
        musb_exit_debugfs(musb);
 
-       cancel_work_sync(&musb->irq_work);
+       cancel_delayed_work_sync(&musb->irq_work);
        cancel_delayed_work_sync(&musb->finish_resume_work);
        cancel_delayed_work_sync(&musb->deassert_reset_work);
        pm_runtime_get_sync(musb->controller);
@@ -2557,6 +2647,7 @@ static int musb_suspend(struct device *dev)
 
        musb_platform_disable(musb);
        musb_generic_disable(musb);
+       WARN_ON(!list_empty(&musb->pending_list));
 
        spin_lock_irqsave(&musb->lock, flags);
 
@@ -2578,9 +2669,11 @@ static int musb_suspend(struct device *dev)
 
 static int musb_resume(struct device *dev)
 {
-       struct musb     *musb = dev_to_musb(dev);
-       u8              devctl;
-       u8              mask;
+       struct musb *musb = dev_to_musb(dev);
+       unsigned long flags;
+       int error;
+       u8 devctl;
+       u8 mask;
 
        /*
         * For static cmos like DaVinci, register values were preserved
@@ -2614,6 +2707,13 @@ static int musb_resume(struct device *dev)
 
        musb_start(musb);
 
+       spin_lock_irqsave(&musb->lock, flags);
+       error = musb_run_resume_work(musb);
+       if (error)
+               dev_err(musb->controller, "resume work failed with %i\n",
+                       error);
+       spin_unlock_irqrestore(&musb->lock, flags);
+
        return 0;
 }
 
@@ -2622,14 +2722,16 @@ static int musb_runtime_suspend(struct device *dev)
        struct musb     *musb = dev_to_musb(dev);
 
        musb_save_context(musb);
+       musb->is_runtime_suspended = 1;
 
        return 0;
 }
 
 static int musb_runtime_resume(struct device *dev)
 {
-       struct musb     *musb = dev_to_musb(dev);
-       static int      first = 1;
+       struct musb *musb = dev_to_musb(dev);
+       unsigned long flags;
+       int error;
 
        /*
         * When pm_runtime_get_sync called for the first time in driver
@@ -2640,9 +2742,10 @@ static int musb_runtime_resume(struct device *dev)
         * Also context restore without save does not make
         * any sense
         */
-       if (!first)
-               musb_restore_context(musb);
-       first = 0;
+       if (!musb->is_initialized)
+               return 0;
+
+       musb_restore_context(musb);
 
        if (musb->need_finish_resume) {
                musb->need_finish_resume = 0;
@@ -2650,6 +2753,14 @@ static int musb_runtime_resume(struct device *dev)
                                msecs_to_jiffies(USB_RESUME_TIMEOUT));
        }
 
+       spin_lock_irqsave(&musb->lock, flags);
+       error = musb_run_resume_work(musb);
+       if (error)
+               dev_err(musb->controller, "resume work failed with %i\n",
+                       error);
+       musb->is_runtime_suspended = 0;
+       spin_unlock_irqrestore(&musb->lock, flags);
+
        return 0;
 }
 
index 2cb88a498f8a5681265654e079456e0ae944667e..91817d77d59c8ecd226e25fb6ce9e07b5a597d3f 100644 (file)
@@ -303,13 +303,14 @@ struct musb_context_registers {
 struct musb {
        /* device lock */
        spinlock_t              lock;
+       spinlock_t              list_lock;      /* resume work list lock */
 
        struct musb_io          io;
        const struct musb_platform_ops *ops;
        struct musb_context_registers context;
 
        irqreturn_t             (*isr)(int, void *);
-       struct work_struct      irq_work;
+       struct delayed_work     irq_work;
        struct delayed_work     deassert_reset_work;
        struct delayed_work     finish_resume_work;
        struct delayed_work     gadget_work;
@@ -337,6 +338,7 @@ struct musb {
        struct list_head        control;        /* of musb_qh */
        struct list_head        in_bulk;        /* of musb_qh */
        struct list_head        out_bulk;       /* of musb_qh */
+       struct list_head        pending_list;   /* pending work list */
 
        struct timer_list       otg_timer;
        struct notifier_block   nb;
@@ -379,12 +381,15 @@ struct musb {
 
        int                     port_mode;      /* MUSB_PORT_MODE_* */
        bool                    session;
-       bool                    quirk_invalid_vbus;
+       unsigned long           quirk_retries;
        bool                    is_host;
 
        int                     a_wait_bcon;    /* VBUS timeout in msecs */
        unsigned long           idle_timeout;   /* Next timeout in jiffies */
 
+       unsigned                is_initialized:1;
+       unsigned                is_runtime_suspended:1;
+
        /* active means connected and not suspended */
        unsigned                is_active:1;
 
@@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *);
 
 extern void musb_hnp_stop(struct musb *musb);
 
+int musb_queue_resume_work(struct musb *musb,
+                          int (*callback)(struct musb *musb, void *data),
+                          void *data);
+
 static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
 {
        if (musb->ops->set_vbus)
index 0f17d2140db6e5c36eceef43b185d2c7c183e091..feae1561b9abb6924d2fe2fc6f1222dfac9d2be7 100644 (file)
@@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb)
        musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
        musb_writel(reg_base, wrp->epintr_clear,
                         wrp->txep_bitmap | wrp->rxep_bitmap);
+       del_timer_sync(&glue->timer);
        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 }
 
-static void otg_timer(unsigned long _musb)
+/* Caller must take musb->lock */
+static int dsps_check_status(struct musb *musb, void *unused)
 {
-       struct musb *musb = (void *)_musb;
        void __iomem *mregs = musb->mregs;
        struct device *dev = musb->controller;
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
        const struct dsps_musb_wrapper *wrp = glue->wrp;
        u8 devctl;
-       unsigned long flags;
        int skip_session = 0;
-       int err;
-
-       err = pm_runtime_get_sync(dev);
-       if (err < 0)
-               dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
 
        /*
         * We poll because DSPS IP's won't expose several OTG-critical
@@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb)
        dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
                                usb_otg_state_string(musb->xceiv->otg->state));
 
-       spin_lock_irqsave(&musb->lock, flags);
        switch (musb->xceiv->otg->state) {
        case OTG_STATE_A_WAIT_VRISE:
                mod_timer(&glue->timer, jiffies +
@@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb)
        default:
                break;
        }
-       spin_unlock_irqrestore(&musb->lock, flags);
 
+       return 0;
+}
+
+static void otg_timer(unsigned long _musb)
+{
+       struct musb *musb = (void *)_musb;
+       struct device *dev = musb->controller;
+       unsigned long flags;
+       int err;
+
+       err = pm_runtime_get(dev);
+       if ((err != -EINPROGRESS) && err < 0) {
+               dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
+               pm_runtime_put_noidle(dev);
+
+               return;
+       }
+
+       spin_lock_irqsave(&musb->lock, flags);
+       err = musb_queue_resume_work(musb, dsps_check_status, NULL);
+       if (err < 0)
+               dev_err(dev, "%s resume work: %i\n", __func__, err);
+       spin_unlock_irqrestore(&musb->lock, flags);
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 }
@@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
-
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
-               goto err2;
-       }
-
        ret = dsps_create_musb_pdev(glue, pdev);
        if (ret)
-               goto err3;
-
-       pm_runtime_mark_last_busy(&pdev->dev);
-       pm_runtime_put_autosuspend(&pdev->dev);
+               goto err;
 
        return 0;
 
-err3:
-       pm_runtime_put_sync(&pdev->dev);
-err2:
-       pm_runtime_dont_use_autosuspend(&pdev->dev);
+err:
        pm_runtime_disable(&pdev->dev);
        return ret;
 }
@@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev)
 
        platform_device_unregister(glue->musb);
 
-       /* disable usbss clocks */
-       pm_runtime_dont_use_autosuspend(&pdev->dev);
-       pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
        return 0;
index 4042ea017985de56346d0ac1dd070f33db079d77..a55173c9e5645d6f5245c0ff78c4ccf151d75107 100644 (file)
@@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
                        musb_ep->dma ? "dma, " : "",
                        musb_ep->packet_sz);
 
-       schedule_work(&musb->irq_work);
+       schedule_delayed_work(&musb->irq_work, 0);
 
 fail:
        spin_unlock_irqrestore(&musb->lock, flags);
@@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
        musb_ep->desc = NULL;
        musb_ep->end_point.desc = NULL;
 
-       schedule_work(&musb->irq_work);
+       schedule_delayed_work(&musb->irq_work, 0);
 
        spin_unlock_irqrestore(&(musb->lock), flags);
 
@@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req)
                rxstate(musb, req);
 }
 
+static int musb_ep_restart_resume_work(struct musb *musb, void *data)
+{
+       struct musb_request *req = data;
+
+       musb_ep_restart(musb, req);
+
+       return 0;
+}
+
 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
                        gfp_t gfp_flags)
 {
        struct musb_ep          *musb_ep;
        struct musb_request     *request;
        struct musb             *musb;
-       int                     status = 0;
+       int                     status;
        unsigned long           lockflags;
 
        if (!ep || !req)
@@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
        if (request->ep != musb_ep)
                return -EINVAL;
 
+       status = pm_runtime_get(musb->controller);
+       if ((status != -EINPROGRESS) && status < 0) {
+               dev_err(musb->controller,
+                       "pm runtime get failed in %s\n",
+                       __func__);
+               pm_runtime_put_noidle(musb->controller);
+
+               return status;
+       }
+       status = 0;
+
        trace_musb_req_enq(request);
 
        /* request is mine now... */
@@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
 
        map_dma_buffer(request, musb, musb_ep);
 
-       pm_runtime_get_sync(musb->controller);
        spin_lock_irqsave(&musb->lock, lockflags);
 
        /* don't queue if the ep is down */
@@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
        list_add_tail(&request->list, &musb_ep->req_list);
 
        /* it this is the head of the queue, start i/o ... */
-       if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
-               musb_ep_restart(musb, request);
+       if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
+               status = musb_queue_resume_work(musb,
+                                               musb_ep_restart_resume_work,
+                                               request);
+               if (status < 0)
+                       dev_err(musb->controller, "%s resume work: %i\n",
+                               __func__, status);
+       }
 
 unlock:
        spin_unlock_irqrestore(&musb->lock, lockflags);
@@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
         */
 
        /* Force check of devctl register for PM runtime */
-       schedule_work(&musb->irq_work);
+       schedule_delayed_work(&musb->irq_work, 0);
 
        pm_runtime_mark_last_busy(musb->controller);
        pm_runtime_put_autosuspend(musb->controller);
index cc1225485509cbabda9c3241334889776edf8870..e8be8e39ab8fbd2d5b03a6fdc98dc3d78b00512e 100644 (file)
@@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(glue->dev);
-       pm_runtime_use_autosuspend(glue->dev);
-       pm_runtime_set_autosuspend_delay(glue->dev, 100);
 
        ret = platform_device_add(musb);
        if (ret) {
                dev_err(&pdev->dev, "failed to register musb device\n");
-               goto err2;
+               goto err3;
        }
 
        return 0;
 
+err3:
+       pm_runtime_disable(glue->dev);
+
 err2:
        platform_device_put(musb);
 
@@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev)
 {
        struct omap2430_glue *glue = platform_get_drvdata(pdev);
 
-       pm_runtime_get_sync(glue->dev);
        platform_device_unregister(glue->musb);
-       pm_runtime_put_sync(glue->dev);
-       pm_runtime_dont_use_autosuspend(glue->dev);
        pm_runtime_disable(glue->dev);
 
        return 0;
index df7c9f46be548f61800b7beaa7f182b5dc447ad5..e85cc8e4e7a9c02e32fdef579d04d8adb22469df 100644 (file)
@@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
                        dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
                                usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
                        idle_timeout = jiffies + (1 * HZ);
-                       schedule_work(&musb->irq_work);
+                       schedule_delayed_work(&musb->irq_work, 0);
 
                } else /* A-dev state machine */ {
                        dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
@@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
                        break;
                }
        }
-       schedule_work(&musb->irq_work);
+       schedule_delayed_work(&musb->irq_work, 0);
 
        return idle_timeout;
 }
@@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
                musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
                if (reg & ~TUSB_PRCM_WNORCS) {
                        musb->is_active = 1;
-                       schedule_work(&musb->irq_work);
+                       schedule_delayed_work(&musb->irq_work, 0);
                }
                dev_dbg(musb->controller, "wake %sactive %02x\n",
                                musb->is_active ? "" : "in", reg);
index f61477bed3a8d3b16ec444dc090def82bd509798..243ac5ebe46a02560d5cbe3ca8fd97fe4e5eb721 100644 (file)
@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+       { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
index 0ff7f38d7800502cdf9159299e603d7475ff9beb..6e9fc8bcc285d122c2148cefcb7c8f57ae6c4aa2 100644 (file)
@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
        { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+       { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { }                                     /* Terminating entry */
 };
 
index 21011c0a4c6401ffd942e83040f3df53099d9e91..48ee04c94a7541ad6c5f9023be107246207c56fd 100644 (file)
 #define ATMEL_VID              0x03eb /* Vendor ID */
 #define STK541_PID             0x2109 /* Zigbee Controller */
 
+/*
+ * Texas Instruments
+ */
+#define TI_VID                 0x0451
+#define TI_CC3200_LAUNCHPAD_PID        0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
+
 /*
  * Blackfin gnICE JTAG
  * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
index ffd086733421316bed0d3c0cf56aa20134fb54f5..1a59f335b063e7e79f8ece2173f2f03574ce3a5b 100644 (file)
@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
 
        /* COMMAND STAGE */
        /* let's send the command via the control pipe */
+       /*
+        * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
+        * Stack may be vmallocated.  So no DMA for us.  Make a copy.
+        */
+       memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
        result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
                                      US_CBI_ADSC, 
                                      USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 
-                                     us->ifnum, srb->cmnd, srb->cmd_len);
+                                     us->ifnum, us->iobuf, srb->cmd_len);
 
        /* check the return code for the command */
        usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
index 532d8e242d4d76c44413d6c814c7548094614ebe..484bebc20bca6a502cc621106d7f34975b48dedc 100644 (file)
@@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
        }
 
        ret = -EPROTONOSUPPORT;
-       if (minorversion == 0)
+       if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
                ret = nfs4_callback_up_net(serv, net);
        else if (xprt->ops->bc_up)
                ret = xprt->ops->bc_up(serv, net);
index 9b3a82abab079f0a03047260ed2ea4e3ee9154ed..1452177c822dbc4a1be8d7e164de88d636764aed 100644 (file)
@@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
        return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
 }
 
+static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state,
+               const nfs4_stateid *stateid)
+{
+       return test_bit(NFS_OPEN_STATE, &state->flags) &&
+               nfs4_stateid_match_other(&state->open_stateid, stateid);
+}
+
 #else
 
 #define nfs4_close_state(a, b) do { } while (0)
index 7897826d7c51bb33f3459939cdb9cac6ab4e9178..241da19b7da4a54a45b1a2bd6cae4cab8cab4dde 100644 (file)
@@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
 }
 
 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
-               nfs4_stateid *arg_stateid,
                nfs4_stateid *stateid, fmode_t fmode)
 {
        clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
        }
        if (stateid == NULL)
                return;
-       /* Handle races with OPEN */
-       if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
-           (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
-           !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
+       /* Handle OPEN+OPEN_DOWNGRADE races */
+       if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
+           !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
                nfs_resync_open_stateid_locked(state);
                return;
        }
@@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
        nfs4_stateid *stateid, fmode_t fmode)
 {
        write_seqlock(&state->seqlock);
-       nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
+       /* Ignore, if the CLOSE argment doesn't match the current stateid */
+       if (nfs4_state_match_open_stateid_other(state, arg_stateid))
+               nfs_clear_open_stateid_locked(state, stateid, fmode);
        write_sequnlock(&state->seqlock);
        if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
                nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
@@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
 static int nfs41_check_expired_locks(struct nfs4_state *state)
 {
        int status, ret = NFS_OK;
-       struct nfs4_lock_state *lsp;
+       struct nfs4_lock_state *lsp, *prev = NULL;
        struct nfs_server *server = NFS_SERVER(state->inode);
 
        if (!test_bit(LK_STATE_IN_USE, &state->flags))
                goto out;
+
+       spin_lock(&state->state_lock);
        list_for_each_entry(lsp, &state->lock_states, ls_locks) {
                if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
                        struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
 
+                       atomic_inc(&lsp->ls_count);
+                       spin_unlock(&state->state_lock);
+
+                       nfs4_put_lock_state(prev);
+                       prev = lsp;
+
                        status = nfs41_test_and_free_expired_stateid(server,
                                        &lsp->ls_stateid,
                                        cred);
@@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
                                        set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
                        } else if (status != NFS_OK) {
                                ret = status;
-                               break;
+                               nfs4_put_lock_state(prev);
+                               goto out;
                        }
+                       spin_lock(&state->state_lock);
                }
-       };
+       }
+       spin_unlock(&state->state_lock);
+       nfs4_put_lock_state(prev);
 out:
        return ret;
 }
@@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        } else if (is_rdwr)
                calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
 
-       if (!nfs4_valid_open_stateid(state))
+       if (!nfs4_valid_open_stateid(state) ||
+           test_bit(NFS_OPEN_STATE, &state->flags) == 0)
                call_close = 0;
        spin_unlock(&state->owner->so_lock);
 
@@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        switch (task->tk_status) {
        case 0:
                renew_lease(data->res.server, data->timestamp);
+               break;
        case -NFS4ERR_ADMIN_REVOKED:
        case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_EXPIRED:
@@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_STALE_STATEID:
                task->tk_status = 0;
-               if (data->roc)
-                       pnfs_roc_set_barrier(data->inode, data->roc_barrier);
                break;
        default:
                if (nfs4_async_handle_error(task, data->res.server,
@@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
                }
        }
        data->rpc_status = task->tk_status;
+       if (data->roc && data->rpc_status == 0)
+               pnfs_roc_set_barrier(data->inode, data->roc_barrier);
 }
 
 static void nfs4_delegreturn_release(void *calldata)
index 5f4281ec5f72c3556d76a479854d6fb378d03124..0959c96616623f876a5905deb6e03c1a438fe338 100644 (file)
@@ -1547,6 +1547,7 @@ restart:
                                ssleep(1);
                        case -NFS4ERR_ADMIN_REVOKED:
                        case -NFS4ERR_STALE_STATEID:
+                       case -NFS4ERR_OLD_STATEID:
                        case -NFS4ERR_BAD_STATEID:
                        case -NFS4ERR_RECLAIM_BAD:
                        case -NFS4ERR_RECLAIM_CONFLICT:
index 7035b997aaa57d7955f4d06501804cd09d8ed675..6aaf425cebc39a9dbb3619d7ad28987ca54a01c7 100644 (file)
@@ -14,7 +14,7 @@
   * are obviously wrong for any sort of memory access.
   */
 #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -1
 
 struct bpf_reg_state {
        enum bpf_reg_type type;
@@ -22,7 +22,8 @@ struct bpf_reg_state {
         * Used to determine if any memory access using this register will
         * result in a bad access.
         */
-       u64 min_value, max_value;
+       s64 min_value;
+       u64 max_value;
        union {
                /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
                s64 imm;
index 348f51b0ec92ed02e72a2060eedb03f37cd0995f..e9c009dc3a4a35a256731f37b3b44d3f05b317e4 100644 (file)
@@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p);
 extern void sched_autogroup_detach(struct task_struct *p);
 extern void sched_autogroup_fork(struct signal_struct *sig);
 extern void sched_autogroup_exit(struct signal_struct *sig);
+extern void sched_autogroup_exit_task(struct task_struct *p);
 #ifdef CONFIG_PROC_FS
 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
@@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { }
 static inline void sched_autogroup_detach(struct task_struct *p) { }
 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit_task(struct task_struct *p) { }
 #endif
 
 extern int yield_to(struct task_struct *p, bool preempt);
index d15214d673b2e8e08fd6437b572278fb1359f10d..2a1abbf8da74368cd01adc40cef6c0644e059ef2 100644 (file)
@@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
                __skb_queue_head_init(&cell->napi_skbs);
+
+               set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
+
                netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
                napi_enable(&cell->napi);
        }
index b9314b48e39f32ef22366087673bcc961d196b43..f390c3bb05c5d2189d169d31038fd5cb991bebaa 100644 (file)
@@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
                   struct netlink_callback *cb);
 int fib_table_flush(struct net *net, struct fib_table *table);
 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
 void fib_free_table(struct fib_table *tb);
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
index fc4f757107df0b51eae83781324337001ed7fb6d..0940598c002ff7fee832f9cf670b8006d0202ddc 100644 (file)
@@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags,
 extern struct list_head net_namespace_list;
 
 struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_fd(int fd);
 
 #ifdef CONFIG_SYSCTL
 void ipx_register_sysctl(void);
index 8a09b32e07d6c33351993c4c6beb19f5782d6c94..dd4104c9aa12c6a517ff7066d52ab6057d26e586 100644 (file)
@@ -272,7 +272,7 @@ int __init rd_load_image(char *from)
                sys_write(out_fd, buf, BLOCK_SIZE);
 #if !defined(CONFIG_S390)
                if (!(i % 16)) {
-                       printk("%c\b", rotator[rotate & 0x3]);
+                       pr_cont("%c\b", rotator[rotate & 0x3]);
                        rotate++;
                }
 #endif
index 99a7e5b388f236ea62f23afefac5c252416d2b60..6a936159c6e06d629c325978623a32359ec095f4 100644 (file)
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
                                reg->map_ptr->key_size,
                                reg->map_ptr->value_size);
                if (reg->min_value != BPF_REGISTER_MIN_RANGE)
-                       verbose(",min_value=%llu",
-                               (unsigned long long)reg->min_value);
+                       verbose(",min_value=%lld",
+                               (long long)reg->min_value);
                if (reg->max_value != BPF_REGISTER_MAX_RANGE)
                        verbose(",max_value=%llu",
                                (unsigned long long)reg->max_value);
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
                         * index'es we need to make sure that whatever we use
                         * will have a set floor within our range.
                         */
-                       if ((s64)reg->min_value < 0) {
+                       if (reg->min_value < 0) {
                                verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
                                        regno);
                                return -EACCES;
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
 {
        if (reg->max_value > BPF_REGISTER_MAX_RANGE)
                reg->max_value = BPF_REGISTER_MAX_RANGE;
-       if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE)
+       if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
+           reg->min_value > BPF_REGISTER_MAX_RANGE)
                reg->min_value = BPF_REGISTER_MIN_RANGE;
 }
 
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                                    struct bpf_insn *insn)
 {
        struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
-       u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE;
+       s64 min_val = BPF_REGISTER_MIN_RANGE;
+       u64 max_val = BPF_REGISTER_MAX_RANGE;
        bool min_set = false, max_set = false;
        u8 opcode = BPF_OP(insn->code);
 
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                return;
        }
 
+       /* If one of our values was at the end of our ranges then we can't just
+        * do our normal operations to the register, we need to set the values
+        * to the min/max since they are undefined.
+        */
+       if (min_val == BPF_REGISTER_MIN_RANGE)
+               dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+       if (max_val == BPF_REGISTER_MAX_RANGE)
+               dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+
        switch (opcode) {
        case BPF_ADD:
-               dst_reg->min_value += min_val;
-               dst_reg->max_value += max_val;
+               if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+                       dst_reg->min_value += min_val;
+               if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+                       dst_reg->max_value += max_val;
                break;
        case BPF_SUB:
-               dst_reg->min_value -= min_val;
-               dst_reg->max_value -= max_val;
+               if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+                       dst_reg->min_value -= min_val;
+               if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+                       dst_reg->max_value -= max_val;
                break;
        case BPF_MUL:
-               dst_reg->min_value *= min_val;
-               dst_reg->max_value *= max_val;
+               if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+                       dst_reg->min_value *= min_val;
+               if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+                       dst_reg->max_value *= max_val;
                break;
        case BPF_AND:
-               /* & is special since it could end up with 0 bits set. */
-               dst_reg->min_value &= min_val;
+               /* Disallow AND'ing of negative numbers, ain't nobody got time
+                * for that.  Otherwise the minimum is 0 and the max is the max
+                * value we could AND against.
+                */
+               if (min_val < 0)
+                       dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+               else
+                       dst_reg->min_value = 0;
                dst_reg->max_value = max_val;
                break;
        case BPF_LSH:
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                 */
                if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
                        dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
-               else
+               else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
                        dst_reg->min_value <<= min_val;
 
                if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
                        dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
-               else
+               else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
                        dst_reg->max_value <<= max_val;
                break;
        case BPF_RSH:
-               dst_reg->min_value >>= min_val;
-               dst_reg->max_value >>= max_val;
-               break;
-       case BPF_MOD:
-               /* % is special since it is an unsigned modulus, so the floor
-                * will always be 0.
+               /* RSH by a negative number is undefined, and the BPF_RSH is an
+                * unsigned shift, so make the appropriate casts.
                 */
-               dst_reg->min_value = 0;
-               dst_reg->max_value = max_val - 1;
+               if (min_val < 0 || dst_reg->min_value < 0)
+                       dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+               else
+                       dst_reg->min_value =
+                               (u64)(dst_reg->min_value) >> min_val;
+               if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+                       dst_reg->max_value >>= max_val;
                break;
        default:
                reset_reg_range_values(regs, insn->dst_reg);
index 0e292132efacaaa09fa4960b92315c9c967b0e82..6ee1febdf6ff838b21db38277e0760ae0909e91f 100644 (file)
@@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
         * this will always be called from the right CPU.
         */
        cpuctx = __get_cpu_context(ctx);
+
+       /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
+       if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
+               /*
+                * We are removing the last cpu event in this context.
+                * If that event is not active in this cpu, cpuctx->cgrp
+                * should've been cleared by perf_cgroup_switch.
+                */
+               WARN_ON_ONCE(!add && cpuctx->cgrp);
+               return;
+       }
        cpuctx->cgrp = add ? event->cgrp : NULL;
 }
 
@@ -8018,6 +8029,7 @@ restart:
  * if <size> is not specified, the range is treated as a single address.
  */
 enum {
+       IF_ACT_NONE = -1,
        IF_ACT_FILTER,
        IF_ACT_START,
        IF_ACT_STOP,
@@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
        { IF_SRC_KERNEL,        "%u/%u" },
        { IF_SRC_FILEADDR,      "%u@%s" },
        { IF_SRC_KERNELADDR,    "%u" },
+       { IF_ACT_NONE,          NULL },
 };
 
 /*
index 9d68c45ebbe30e34290dfda571f7d75e01ce3d97..3076f3089919b60697f296ec01126e0af5947bcc 100644 (file)
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
         */
        perf_event_exit_task(tsk);
 
+       sched_autogroup_exit_task(tsk);
        cgroup_exit(tsk);
 
        /*
index 51c4b24b6328609e57e6e206814a3c3c938d8555..c2b88490d857583026a35090b62f7891446b7ba2 100644 (file)
@@ -45,6 +45,14 @@ enum {
 #define LOCKF_USED_IN_IRQ_READ \
                (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 
+/*
+ * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * .data and .bss to fit in required 32MB limit for the kernel. With
+ * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * So, reduce the static allocations for lockdeps related structures so that
+ * everything fits in current required size limit.
+ */
+#ifdef CONFIG_PROVE_LOCKING_SMALL
 /*
  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  * we track.
@@ -54,18 +62,24 @@ enum {
  * table (if it's not there yet), and we check it for lock order
  * conflicts and deadlocks.
  */
+#define MAX_LOCKDEP_ENTRIES    16384UL
+#define MAX_LOCKDEP_CHAINS_BITS        15
+#define MAX_STACK_TRACE_ENTRIES        262144UL
+#else
 #define MAX_LOCKDEP_ENTRIES    32768UL
 
 #define MAX_LOCKDEP_CHAINS_BITS        16
-#define MAX_LOCKDEP_CHAINS     (1UL << MAX_LOCKDEP_CHAINS_BITS)
-
-#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 
 /*
  * Stack-trace: tightly packed array of stack backtrace
  * addresses. Protected by the hash_lock.
  */
 #define MAX_STACK_TRACE_ENTRIES        524288UL
+#endif
+
+#define MAX_LOCKDEP_CHAINS     (1UL << MAX_LOCKDEP_CHAINS_BITS)
+
+#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 
 extern struct list_head all_lock_classes;
 extern struct lock_chain lock_chains[];
index a5d966cb889175d5b196283341b1cead20f20603..f1c8fd5662464cd356d174b80e8cff9e0a6a4b12 100644 (file)
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
 {
        if (tg != &root_task_group)
                return false;
-
        /*
-        * We can only assume the task group can't go away on us if
-        * autogroup_move_group() can see us on ->thread_group list.
+        * If we race with autogroup_move_group() the caller can use the old
+        * value of signal->autogroup but in this case sched_move_task() will
+        * be called again before autogroup_kref_put().
+        *
+        * However, there is no way sched_autogroup_exit_task() could tell us
+        * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
         */
        if (p->flags & PF_EXITING)
                return false;
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
        return true;
 }
 
+void sched_autogroup_exit_task(struct task_struct *p)
+{
+       /*
+        * We are going to call exit_notify() and autogroup_move_group() can't
+        * see this thread after that: we can no longer use signal->autogroup.
+        * See the PF_EXITING check in task_wants_autogroup().
+        */
+       sched_move_task(p);
+}
+
 static void
 autogroup_move_group(struct task_struct *p, struct autogroup *ag)
 {
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
        }
 
        p->signal->autogroup = autogroup_kref_get(ag);
-
-       if (!READ_ONCE(sysctl_sched_autogroup_enabled))
-               goto out;
-
+       /*
+        * We can't avoid sched_move_task() after we changed signal->autogroup,
+        * this process can already run with task_group() == prev->tg or we can
+        * race with cgroup code which can read autogroup = prev under rq->lock.
+        * In the latter case for_each_thread() can not miss a migrating thread,
+        * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
+        * can't be removed from thread list, we hold ->siglock.
+        *
+        * If an exiting thread was already removed from thread list we rely on
+        * sched_autogroup_exit_task().
+        */
        for_each_thread(p, t)
                sched_move_task(t);
-out:
+
        unlock_task_sighand(p, &flags);
        autogroup_kref_put(prev);
 }
index b01e547d4d04475e19590aad8f16c403076f7eb6..a6c8db1d62f65ffb2ec6a53011ce0b675f9fb7d4 100644 (file)
@@ -1085,6 +1085,9 @@ config PROVE_LOCKING
 
         For more details, see Documentation/locking/lockdep-design.txt.
 
+config PROVE_LOCKING_SMALL
+       bool
+
 config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
index e034afbd1bb025ea884ffc623c9467a730c463be..08ce36147c4c3a76bba8d98004617f7a7f03a2e6 100644 (file)
@@ -652,6 +652,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
                        batadv_softif_destroy_sysfs(hard_iface->soft_iface);
        }
 
+       hard_iface->soft_iface = NULL;
        batadv_hardif_put(hard_iface);
 
 out:
index 2333777f919d8ef3e28055733e0d55b64d3ecff3..8af1611b8ab2c21e53b9c46bf74a9fa677d805a4 100644 (file)
@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (unlikely(!primary_if)) {
                err = BATADV_TP_REASON_DST_UNREACHABLE;
+               tp_vars->reason = err;
                goto out;
        }
 
index f61c0e02a4130318060e35180d3cab391daf64af..7001da910c6b26cb2300c38147305a459895cc49 100644 (file)
@@ -219,6 +219,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        bool alloc;
        int id;
 
+       if (atomic_read(&net->count) == 0)
+               return NETNSA_NSID_NOT_ASSIGNED;
        spin_lock_irqsave(&net->nsid_lock, flags);
        alloc = atomic_read(&peer->count) == 0 ? false : true;
        id = __peernet2id_alloc(net, peer, &alloc);
index db313ec7af32926c96272435176ab88d75236e81..a99917b5de337b97792635675cbc0cf3a6916442 100644 (file)
@@ -840,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
        if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
            (ext_filter_mask & RTEXT_FILTER_VF)) {
                int num_vfs = dev_num_vf(dev->dev.parent);
-               size_t size = nla_total_size(sizeof(struct nlattr));
-               size += nla_total_size(num_vfs * sizeof(struct nlattr));
+               size_t size = nla_total_size(0);
                size += num_vfs *
-                       (nla_total_size(sizeof(struct ifla_vf_mac)) +
-                        nla_total_size(MAX_VLAN_LIST_LEN *
-                                       sizeof(struct nlattr)) +
+                       (nla_total_size(0) +
+                        nla_total_size(sizeof(struct ifla_vf_mac)) +
+                        nla_total_size(sizeof(struct ifla_vf_vlan)) +
+                        nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
                         nla_total_size(MAX_VLAN_LIST_LEN *
                                        sizeof(struct ifla_vf_vlan_info)) +
                         nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
+                        nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
                         nla_total_size(sizeof(struct ifla_vf_rate)) +
                         nla_total_size(sizeof(struct ifla_vf_link_state)) +
                         nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
+                        nla_total_size(0) + /* nest IFLA_VF_STATS */
                         /* IFLA_VF_STATS_RX_PACKETS */
                         nla_total_size_64bit(sizeof(__u64)) +
                         /* IFLA_VF_STATS_TX_PACKETS */
@@ -899,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev,
 
 static size_t rtnl_xdp_size(const struct net_device *dev)
 {
-       size_t xdp_size = nla_total_size(1);    /* XDP_ATTACHED */
+       size_t xdp_size = nla_total_size(0) +   /* nest IFLA_XDP */
+                         nla_total_size(1);    /* XDP_ATTACHED */
 
        if (!dev->netdev_ops->ndo_xdp)
                return 0;
@@ -1606,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                head = &net->dev_index_head[h];
                hlist_for_each_entry(dev, head, index_hlist) {
                        if (link_dump_filtered(dev, master_idx, kind_ops))
-                               continue;
+                               goto cont;
                        if (idx < s_idx)
                                goto cont;
                        err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -2849,7 +2852,10 @@ nla_put_failure:
 
 static inline size_t rtnl_fdb_nlmsg_size(void)
 {
-       return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
+       return NLMSG_ALIGN(sizeof(struct ndmsg)) +
+              nla_total_size(ETH_ALEN) +       /* NDA_LLADDR */
+              nla_total_size(sizeof(u16)) +    /* NDA_VLAN */
+              0;
 }
 
 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
index c3b80478226ed4aab6ed8ce1e75ea34c9a921385..161fc0f0d752fb559918c22c5c87e7ef7c84dbe6 100644 (file)
@@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
 
 int fib_unmerge(struct net *net)
 {
-       struct fib_table *old, *new;
+       struct fib_table *old, *new, *main_table;
 
        /* attempt to fetch local table if it has been allocated */
        old = fib_get_table(net, RT_TABLE_LOCAL);
@@ -162,11 +162,21 @@ int fib_unmerge(struct net *net)
        if (!new)
                return -ENOMEM;
 
+       /* table is already unmerged */
+       if (new == old)
+               return 0;
+
        /* replace merged table with clean table */
-       if (new != old) {
-               fib_replace_table(net, old, new);
-               fib_free_table(old);
-       }
+       fib_replace_table(net, old, new);
+       fib_free_table(old);
+
+       /* attempt to fetch main table if it has been allocated */
+       main_table = fib_get_table(net, RT_TABLE_MAIN);
+       if (!main_table)
+               return 0;
+
+       /* flush local entries from main table */
+       fib_table_flush_external(main_table);
 
        return 0;
 }
index 4cff74d4133f124374146a5c997847f27a5f7345..026f309c51e9b6143e74efe2d31c8c5e7a7a3c26 100644 (file)
@@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
                                local_l = fib_find_node(lt, &local_tp, l->key);
 
                        if (fib_insert_alias(lt, local_tp, local_l, new_fa,
-                                            NULL, l->key))
+                                            NULL, l->key)) {
+                               kmem_cache_free(fn_alias_kmem, new_fa);
                                goto out;
+                       }
                }
 
                /* stop loop if key wrapped back to 0 */
@@ -1760,6 +1762,71 @@ out:
        return NULL;
 }
 
+/* Caller must hold RTNL */
+void fib_table_flush_external(struct fib_table *tb)
+{
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *pn = t->kv;
+       unsigned long cindex = 1;
+       struct hlist_node *tmp;
+       struct fib_alias *fa;
+
+       /* walk trie in reverse order */
+       for (;;) {
+               unsigned char slen = 0;
+               struct key_vector *n;
+
+               if (!(cindex--)) {
+                       t_key pkey = pn->key;
+
+                       /* cannot resize the trie vector */
+                       if (IS_TRIE(pn))
+                               break;
+
+                       /* resize completed node */
+                       pn = resize(t, pn);
+                       cindex = get_index(pkey, pn);
+
+                       continue;
+               }
+
+               /* grab the next available node */
+               n = get_child(pn, cindex);
+               if (!n)
+                       continue;
+
+               if (IS_TNODE(n)) {
+                       /* record pn and cindex for leaf walking */
+                       pn = n;
+                       cindex = 1ul << n->bits;
+
+                       continue;
+               }
+
+               hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+                       /* if alias was cloned to local then we just
+                        * need to remove the local copy from main
+                        */
+                       if (tb->tb_id != fa->tb_id) {
+                               hlist_del_rcu(&fa->fa_list);
+                               alias_free_mem_rcu(fa);
+                               continue;
+                       }
+
+                       /* record local slen */
+                       slen = fa->fa_slen;
+               }
+
+               /* update leaf slen */
+               n->slen = slen;
+
+               if (hlist_empty(&n->leaf)) {
+                       put_child_root(pn, n->key, NULL);
+                       node_free(n);
+               }
+       }
+}
+
 /* Caller must hold RTNL. */
 int fib_table_flush(struct net *net, struct fib_table *tb)
 {
index 606cc3e85d2bc7b1fc02e5c2ea305a5bd65349c5..15db786d50ed28c7d31855a9582ab111752b3641 100644 (file)
@@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev)
 }
 
 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
 static void igmpv3_clear_delrec(struct in_device *in_dev);
 static int sf_setstate(struct ip_mc_list *pmc);
 static void sf_markstate(struct ip_mc_list *pmc);
@@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        spin_unlock_bh(&in_dev->mc_tomb_lock);
 }
 
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
+/*
+ * restore ip_mc_list deleted records
+ */
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
 {
        struct ip_mc_list *pmc, *pmc_prev;
-       struct ip_sf_list *psf, *psf_next;
+       struct ip_sf_list *psf;
+       struct net *net = dev_net(in_dev->dev);
+       __be32 multiaddr = im->multiaddr;
 
        spin_lock_bh(&in_dev->mc_tomb_lock);
        pmc_prev = NULL;
@@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
                        in_dev->mc_tomb = pmc->next;
        }
        spin_unlock_bh(&in_dev->mc_tomb_lock);
+
+       spin_lock_bh(&im->lock);
        if (pmc) {
-               for (psf = pmc->tomb; psf; psf = psf_next) {
-                       psf_next = psf->sf_next;
-                       kfree(psf);
+               im->interface = pmc->interface;
+               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               im->sfmode = pmc->sfmode;
+               if (pmc->sfmode == MCAST_INCLUDE) {
+                       im->tomb = pmc->tomb;
+                       im->sources = pmc->sources;
+                       for (psf = im->sources; psf; psf = psf->sf_next)
+                               psf->sf_crcount = im->crcount;
                }
                in_dev_put(pmc->interface);
-               kfree(pmc);
        }
+       spin_unlock_bh(&im->lock);
 }
 
+/*
+ * flush ip_mc_list deleted records
+ */
 static void igmpv3_clear_delrec(struct in_device *in_dev)
 {
        struct ip_mc_list *pmc, *nextpmc;
@@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        ip_mc_hash_add(in_dev, im);
 
 #ifdef CONFIG_IP_MULTICAST
-       igmpv3_del_delrec(in_dev, im->multiaddr);
+       igmpv3_del_delrec(in_dev, im);
 #endif
        igmp_group_added(im);
        if (!in_dev->dead)
@@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev)
 
        ASSERT_RTNL();
 
-       for_each_pmc_rtnl(in_dev, pmc)
+       for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+               igmpv3_del_delrec(in_dev, pmc);
+#endif
                igmp_group_added(pmc);
+       }
 }
 
 /* Device going down */
@@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev)
        in_dev->mr_gq_running = 0;
        if (del_timer(&in_dev->mr_gq_timer))
                __in_dev_put(in_dev);
-       igmpv3_clear_delrec(in_dev);
 #endif
 
        ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
@@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev)
 #endif
        ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
 
-       for_each_pmc_rtnl(in_dev, pmc)
+       for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+               igmpv3_del_delrec(in_dev, pmc);
+#endif
                igmp_group_added(pmc);
+       }
 }
 
 /*
@@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
 
        /* Deactivate timers */
        ip_mc_down(in_dev);
+#ifdef CONFIG_IP_MULTICAST
+       igmpv3_clear_delrec(in_dev);
+#endif
 
        while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
                in_dev->mc_list = i->next_rcu;
                in_dev->mc_count--;
-
-               /* We've dropped the groups in ip_mc_down already */
-               ip_mc_clear_src(i);
                ip_ma_put(i);
        }
 }
index 1294af4e0127b7a9b98d6e9cfa9e3979c7d7086e..f9038d6b109eb41b2d8a3898a2c7ff6519f2c49b 100644 (file)
@@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
        icsk->icsk_ca_ops = ca;
        icsk->icsk_ca_setsockopt = 1;
 
-       if (sk->sk_state != TCP_CLOSE)
+       if (sk->sk_state != TCP_CLOSE) {
+               memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
                tcp_init_congestion_control(sk);
+       }
 }
 
 /* Manage refcounts on socket close. */
index d123d68f4d1de093a9e9b36f210600fa7406d5e9..0de9d5d2b9ae5cde981924ff0341d20b3e79c3d9 100644 (file)
@@ -1652,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 
        if (use_hash2) {
                hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
-                           udp_table.mask;
-               hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+                           udptable->mask;
+               hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
 start_lookup:
-               hslot = &udp_table.hash2[hash2];
+               hslot = &udptable->hash2[hash2];
                offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
        }
 
index 87784560dc46bceaf184f811222f312383f11e04..0a4759b89da24c8bf50830659545b9f7d231a065 100644 (file)
@@ -1034,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        int mtu;
        unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
        unsigned int max_headroom = psh_hlen;
+       bool use_cache = false;
        u8 hop_limit;
        int err = -1;
 
@@ -1066,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
                memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
                neigh_release(neigh);
-       } else if (!fl6->flowi6_mark)
+       } else if (!(t->parms.flags &
+                    (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
+               /* enable the cache only only if the routing decision does
+                * not depend on the current inner header value
+                */
+               use_cache = true;
+       }
+
+       if (use_cache)
                dst = dst_cache_get(&t->dst_cache);
 
        if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
@@ -1150,7 +1159,7 @@ route_lookup:
                if (t->encap.type != TUNNEL_ENCAP_NONE)
                        goto tx_err_dst_release;
        } else {
-               if (!fl6->flowi6_mark && ndst)
+               if (use_cache && ndst)
                        dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
        }
        skb_dst_set(skb, dst);
index b2ef061e6836af29048d1d86ed7b29587f3dd410..e5056d4873d1b62ca33f006f71ed65d06461c48f 100644 (file)
@@ -706,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 
        if (use_hash2) {
                hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
-                           udp_table.mask;
-               hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+                           udptable->mask;
+               hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 start_lookup:
-               hslot = &udp_table.hash2[hash2];
+               hslot = &udptable->hash2[hash2];
                offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
        }
 
index 965f7e344cef8961a85c4e2e6d36f7d1b043f3d7..3dc97b4f982b57ed4375b5cf971155fdbd2c6eb7 100644 (file)
@@ -97,7 +97,7 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int len = skb->len;
        int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
 
-       if (likely(ret == NET_XMIT_SUCCESS)) {
+       if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
                atomic_long_add(len, &priv->tx_bytes);
                atomic_long_inc(&priv->tx_packets);
        } else {
index fce25afb652ad62d22ad2d8b26118245fee6ba14..982f6c44ea01f053a51afcbb4b271a2e77df2178 100644 (file)
@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        int ret;
        int chk_addr_ret;
 
-       if (!sock_flag(sk, SOCK_ZAPPED))
-               return -EINVAL;
        if (addr_len < sizeof(struct sockaddr_l2tpip))
                return -EINVAL;
        if (addr->l2tp_family != AF_INET)
@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        read_unlock_bh(&l2tp_ip_lock);
 
        lock_sock(sk);
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               goto out;
+
        if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
                goto out;
 
index ad3468c32b53a33b32da3e23c0aa109d13d01dce..9978d01ba0bae4eaf34810665d581ddd06579e96 100644 (file)
@@ -269,8 +269,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        int addr_type;
        int err;
 
-       if (!sock_flag(sk, SOCK_ZAPPED))
-               return -EINVAL;
        if (addr->l2tp_family != AF_INET6)
                return -EINVAL;
        if (addr_len < sizeof(*addr))
@@ -296,6 +294,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        lock_sock(sk);
 
        err = -EINVAL;
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               goto out_unlock;
+
        if (sk->sk_state != TCP_CLOSE)
                goto out_unlock;
 
index 78e9ecbc96e616d0f90228abb5bcda59147ea73f..8e05032689f08677d37942c1549812bfee43cf05 100644 (file)
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
        }
 
        /* No need to do anything if the driver does all */
-       if (!local->ops->set_tim)
+       if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
                return;
 
        if (sta->dead)
index 1c56abc496272bb58d71639975e4706c266f2066..bd5f4be89435eb6ef20b7103c7627f7bc10e9cce 100644 (file)
@@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
                                struct sta_info *sta,
                                struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct fq *fq = &local->fq;
        struct ieee80211_vif *vif;
        struct txq_info *txqi;
@@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
        if (!txqi)
                return false;
 
-       info->control.vif = vif;
-
        spin_lock_bh(&fq->lock);
        ieee80211_txq_enqueue(local, txqi, skb);
        spin_unlock_bh(&fq->lock);
@@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
 
        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
                tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
-               *ieee80211_get_qos_ctl(hdr) = tid;
                hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
        } else {
                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
@@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
                      (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
        info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
 
+       if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+               tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+               *ieee80211_get_qos_ctl(hdr) = tid;
+       }
+
        __skb_queue_head_init(&tx.skbs);
 
        tx.flags = IEEE80211_TX_UNICAST;
@@ -3426,6 +3427,11 @@ begin:
                goto begin;
        }
 
+       if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+               info->flags |= IEEE80211_TX_CTL_AMPDU;
+       else
+               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
        if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
                struct sta_info *sta = container_of(txq->sta, struct sta_info,
                                                    sta);
index ee715764a828954e4c8d90e3bfbcbc697c100ec0..6832bf6ab69fe012ea4eeb3c02b79523083cdc58 100644 (file)
@@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
                vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
        }
 
+       /*
+        * This is a workaround for VHT-enabled STAs which break the spec
+        * and have the VHT-MCS Rx map filled in with value 3 for all eight
+        * spacial streams, an example is AR9462.
+        *
+        * As per spec, in section 22.1.1 Introduction to the VHT PHY
+        * A VHT STA shall support at least single spactial stream VHT-MCSs
+        * 0 to 7 (transmit and receive) in all supported channel widths.
+        */
+       if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
+               vht_cap->vht_supported = false;
+               sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
+                          sta->addr);
+               return;
+       }
+
        /* finally set up the bandwidth */
        switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
        case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
index 2b2a7974e4bba24a75b60a17b0014376e2e887c9..8e93d4afe5ead0b0a983a1cb29627b517c2c8421 100644 (file)
@@ -112,7 +112,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
 
        for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
             it_chain = &tp->next)
-               tfilter_notify(net, oskb, n, tp, 0, event, false);
+               tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false);
 }
 
 /* Select new prio value from the range, managed by kernel. */
@@ -430,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
        if (!skb)
                return -ENOBUFS;
 
-       if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
+       if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
+                         n->nlmsg_flags, event) <= 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
index f9f5f3c3dab530c0b798d314873800500ccc30b5..db32777ab59117b321c1bdef713a1667748bc10a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/socket.c: TIPC socket API
  *
- * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
  * All rights reserved.
  *
@@ -129,54 +129,8 @@ static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
 static const struct proto_ops msg_ops;
 static struct proto tipc_proto;
-
 static const struct rhashtable_params tsk_rht_params;
 
-/*
- * Revised TIPC socket locking policy:
- *
- * Most socket operations take the standard socket lock when they start
- * and hold it until they finish (or until they need to sleep).  Acquiring
- * this lock grants the owner exclusive access to the fields of the socket
- * data structures, with the exception of the backlog queue.  A few socket
- * operations can be done without taking the socket lock because they only
- * read socket information that never changes during the life of the socket.
- *
- * Socket operations may acquire the lock for the associated TIPC port if they
- * need to perform an operation on the port.  If any routine needs to acquire
- * both the socket lock and the port lock it must take the socket lock first
- * to avoid the risk of deadlock.
- *
- * The dispatcher handling incoming messages cannot grab the socket lock in
- * the standard fashion, since invoked it runs at the BH level and cannot block.
- * Instead, it checks to see if the socket lock is currently owned by someone,
- * and either handles the message itself or adds it to the socket's backlog
- * queue; in the latter case the queued message is processed once the process
- * owning the socket lock releases it.
- *
- * NOTE: Releasing the socket lock while an operation is sleeping overcomes
- * the problem of a blocked socket operation preventing any other operations
- * from occurring.  However, applications must be careful if they have
- * multiple threads trying to send (or receive) on the same socket, as these
- * operations might interfere with each other.  For example, doing a connect
- * and a receive at the same time might allow the receive to consume the
- * ACK message meant for the connect.  While additional work could be done
- * to try and overcome this, it doesn't seem to be worthwhile at the present.
- *
- * NOTE: Releasing the socket lock while an operation is sleeping also ensures
- * that another operation that must be performed in a non-blocking manner is
- * not delayed for very long because the lock has already been taken.
- *
- * NOTE: This code assumes that certain fields of a port/socket pair are
- * constant over its lifetime; such fields can be examined without taking
- * the socket lock and/or port lock, and do not need to be re-read even
- * after resuming processing after waiting.  These fields include:
- *   - socket type
- *   - pointer to socket sk structure (aka tipc_sock structure)
- *   - pointer to port structure
- *   - port reference
- */
-
 static u32 tsk_own_node(struct tipc_sock *tsk)
 {
        return msg_prevnode(&tsk->phdr);
index 5d1c14a2f26823f403d2677e27a34b9a2e5a3d6d..2358f2690ec58c20aeb77fb60ac45b3db27ecac6 100644 (file)
@@ -2199,7 +2199,8 @@ out:
  *     Sleep until more data has arrived. But check for races..
  */
 static long unix_stream_data_wait(struct sock *sk, long timeo,
-                                 struct sk_buff *last, unsigned int last_len)
+                                 struct sk_buff *last, unsigned int last_len,
+                                 bool freezable)
 {
        struct sk_buff *tail;
        DEFINE_WAIT(wait);
@@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
 
                sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                unix_state_unlock(sk);
-               timeo = freezable_schedule_timeout(timeo);
+               if (freezable)
+                       timeo = freezable_schedule_timeout(timeo);
+               else
+                       timeo = schedule_timeout(timeo);
                unix_state_lock(sk);
 
                if (sock_flag(sk, SOCK_DEAD))
@@ -2250,7 +2254,8 @@ struct unix_stream_read_state {
        unsigned int splice_flags;
 };
 
-static int unix_stream_read_generic(struct unix_stream_read_state *state)
+static int unix_stream_read_generic(struct unix_stream_read_state *state,
+                                   bool freezable)
 {
        struct scm_cookie scm;
        struct socket *sock = state->socket;
@@ -2330,7 +2335,7 @@ again:
                        mutex_unlock(&u->iolock);
 
                        timeo = unix_stream_data_wait(sk, timeo, last,
-                                                     last_len);
+                                                     last_len, freezable);
 
                        if (signal_pending(current)) {
                                err = sock_intr_errno(timeo);
@@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                .flags = flags
        };
 
-       return unix_stream_read_generic(&state);
+       return unix_stream_read_generic(&state, true);
 }
 
 static int unix_stream_splice_actor(struct sk_buff *skb,
@@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
            flags & SPLICE_F_NONBLOCK)
                state.flags = MSG_DONTWAIT;
 
-       return unix_stream_read_generic(&state);
+       return unix_stream_read_generic(&state, false);
 }
 
 static int unix_shutdown(struct socket *sock, int mode)
index 08d2e948c9ad306a0ed40531efc4103877b9f4ae..f0c0c8a48c920887cadbc714f3e55015e9b9761d 100644 (file)
@@ -71,6 +71,7 @@ struct cfg80211_registered_device {
        struct list_head bss_list;
        struct rb_root bss_tree;
        u32 bss_generation;
+       u32 bss_entries;
        struct cfg80211_scan_request *scan_req; /* protected by RTNL */
        struct sk_buff *scan_msg;
        struct cfg80211_sched_scan_request __rcu *sched_scan_req;
index b5bd58d0f73129104e32a07155b03f915c343d54..35ad69fd08383a2a8392170d2a580fc4549276d7 100644 (file)
  * also linked into the probe response struct.
  */
 
+/*
+ * Limit the number of BSS entries stored in mac80211. Each one is
+ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
+ * If somebody wants to really attack this though, they'd likely
+ * use small beacons, and only one type of frame, limiting each of
+ * the entries to a much smaller size (in order to generate more
+ * entries in total, so overhead is bigger.)
+ */
+static int bss_entries_limit = 1000;
+module_param(bss_entries_limit, int, 0644);
+MODULE_PARM_DESC(bss_entries_limit,
+                 "limit to number of scan BSS entries (per wiphy, default 1000)");
+
 #define IEEE80211_SCAN_RESULT_EXPIRE   (30 * HZ)
 
 static void bss_free(struct cfg80211_internal_bss *bss)
@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
 
        list_del_init(&bss->list);
        rb_erase(&bss->rbn, &rdev->bss_tree);
+       rdev->bss_entries--;
+       WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
+                 "rdev bss entries[%d]/list[empty:%d] corruption\n",
+                 rdev->bss_entries, list_empty(&rdev->bss_list));
        bss_ref_put(rdev, bss);
        return true;
 }
@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
                rdev->bss_generation++;
 }
 
+static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
+{
+       struct cfg80211_internal_bss *bss, *oldest = NULL;
+       bool ret;
+
+       lockdep_assert_held(&rdev->bss_lock);
+
+       list_for_each_entry(bss, &rdev->bss_list, list) {
+               if (atomic_read(&bss->hold))
+                       continue;
+
+               if (!list_empty(&bss->hidden_list) &&
+                   !bss->pub.hidden_beacon_bss)
+                       continue;
+
+               if (oldest && time_before(oldest->ts, bss->ts))
+                       continue;
+               oldest = bss;
+       }
+
+       if (WARN_ON(!oldest))
+               return false;
+
+       /*
+        * The callers make sure to increase rdev->bss_generation if anything
+        * gets removed (and a new entry added), so there's no need to also do
+        * it here.
+        */
+
+       ret = __cfg80211_unlink_bss(rdev, oldest);
+       WARN_ON(!ret);
+       return ret;
+}
+
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
                           bool send_message)
 {
@@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
        const u8 *ie;
        int i, ssidlen;
        u8 fold = 0;
+       u32 n_entries = 0;
 
        ies = rcu_access_pointer(new->pub.beacon_ies);
        if (WARN_ON(!ies))
@@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
        /* This is the bad part ... */
 
        list_for_each_entry(bss, &rdev->bss_list, list) {
+               /*
+                * we're iterating all the entries anyway, so take the
+                * opportunity to validate the list length accounting
+                */
+               n_entries++;
+
                if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
                        continue;
                if (bss->pub.channel != new->pub.channel)
@@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
                                   new->pub.beacon_ies);
        }
 
+       WARN_ONCE(n_entries != rdev->bss_entries,
+                 "rdev bss entries[%d]/list[len:%d] corruption\n",
+                 rdev->bss_entries, n_entries);
+
        return true;
 }
 
@@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
                        }
                }
 
+               if (rdev->bss_entries >= bss_entries_limit &&
+                   !cfg80211_bss_expire_oldest(rdev)) {
+                       kfree(new);
+                       goto drop;
+               }
+
                list_add_tail(&new->list, &rdev->bss_list);
+               rdev->bss_entries++;
                rb_insert_bss(rdev, new);
                found = new;
        }
index 5ea12afc770610e88a2b8ad870d08676fa2a71c9..659b507b347d6a5d183baab16665ace0a03ae672 100644 (file)
@@ -1158,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
                   58500000,
                   65000000,
                   78000000,
-                  0,
+               /* not in the spec, but some devices use this: */
+                  86500000,
                },
                {  13500000,
                   27000000,
index fc3036b34e5128cc73910eb6e3b2bbd2d94d9e50..a4d90aa1045afc46499e00da9baf9bbcea34752a 100644 (file)
@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
        /* released below */
        cred = get_current_cred();
        cxt = cred_cxt(cred);
-       profile = aa_cred_profile(cred);
-       previous_profile = cxt->previous;
+       profile = aa_get_newest_profile(aa_cred_profile(cred));
+       previous_profile = aa_get_newest_profile(cxt->previous);
 
        if (unconfined(profile)) {
                info = "unconfined";
@@ -718,6 +718,8 @@ audit:
 out:
        aa_put_profile(hat);
        kfree(name);
+       aa_put_profile(profile);
+       aa_put_profile(previous_profile);
        put_cred(cred);
 
        return error;