]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'trace-v4.2-rc1-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/roste...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 15 Jul 2015 18:14:10 +0000 (11:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 15 Jul 2015 18:14:10 +0000 (11:14 -0700)
Pull tracing fix from Steven Rostedt:
 "Fengguang Wu discovered a crash that happened to be because of the
  branch tracer (traces unlikely and likely branches) when enabled with
  certain debug options.

  What happened was that various debug options like lockdep and
  DEBUG_PREEMPT can cause parts of the branch tracer to recurse outside
  its recursion protection.  In fact, part of its recursion protection
  used these features that caused the lockup.  This cleans up the code a
  little and makes the recursion protection a bit more robust"

* tag 'trace-v4.2-rc1-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Have branch tracer use recursive field of task struct

319 files changed:
Documentation/arm/sunxi/README
Documentation/devicetree/bindings/arm/sunxi.txt
Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
Documentation/power/swsusp.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/dts/am335x-boneblack.dts
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/atlas7.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra72-evm.dts
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/sunxi_defconfig
arch/arm/include/asm/io.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/smp.c
arch/arm/lib/memcpy.S
arch/arm/lib/memset.S
arch/arm/mach-omap2/dma.c
arch/arm/mach-prima2/Kconfig
arch/arm/mach-prima2/rtciobrg.c
arch/arm/mach-sunxi/Kconfig
arch/arm/mach-sunxi/sunxi.c
arch/arm/mm/ioremap.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/vdso/vdsomunge.c
arch/arm64/Kconfig
arch/arm64/boot/dts/apm/apm-mustang.dts
arch/arm64/boot/dts/arm/Makefile
arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts [new file with mode: 0644]
arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/acpi.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/entry32.S
arch/arm64/kernel/smp.c
arch/arm64/mm/Makefile
arch/cris/arch-v32/drivers/sync_serial.c
arch/mips/Kconfig
arch/mips/include/asm/mach-loongson64/mmzone.h
arch/mips/include/asm/smp.h
arch/mips/kernel/branch.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/loongson64/common/bonito-irq.c
arch/mips/loongson64/common/cmdline.c
arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
arch/mips/loongson64/common/env.c
arch/mips/loongson64/common/irq.c
arch/mips/loongson64/common/setup.c
arch/mips/loongson64/fuloong-2e/irq.c
arch/mips/loongson64/lemote-2f/clock.c
arch/mips/loongson64/loongson-3/numa.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-r4k.c
arch/mips/mti-malta/malta-time.c
arch/mips/pistachio/init.c
arch/mips/pistachio/time.c
arch/parisc/include/asm/pgtable.h
arch/parisc/include/asm/tlbflush.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/traps.c
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/traps.c
arch/powerpc/mm/fault.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-prd.c
arch/powerpc/sysdev/ppc4xx_hsta_msi.c
arch/tile/lib/memcpy_user_64.c
arch/x86/Kconfig
arch/x86/include/asm/espfix.h
arch/x86/include/asm/kasan.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/espfix_64.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/irq.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/lib/usercopy.c
arch/x86/mm/kasan_init_64.c
drivers/acpi/acpi_lpss.c
drivers/acpi/nfit.c
drivers/acpi/nfit.h
drivers/acpi/osl.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/ata/Kconfig
drivers/ata/ahci_platform.c
drivers/base/firmware_class.c
drivers/base/power/domain.c
drivers/base/power/wakeirq.c
drivers/base/power/wakeup.c
drivers/clk/at91/clk-h32mx.c
drivers/clk/at91/clk-main.c
drivers/clk/at91/clk-master.c
drivers/clk/at91/clk-pll.c
drivers/clk/at91/clk-system.c
drivers/clk/at91/clk-utmi.c
drivers/clk/bcm/clk-iproc-asiu.c
drivers/clk/bcm/clk-iproc-pll.c
drivers/clk/clk-stm32f4.c
drivers/clk/mediatek/clk-mt8173.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/st/clk-flexgen.c
drivers/clk/st/clkgen-fsyn.c
drivers/clk/st/clkgen-mux.c
drivers/clk/st/clkgen-pll.c
drivers/clk/sunxi/clk-sunxi.c
drivers/clocksource/timer-imx-gpt.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/nx/nx-aes-ccm.c
drivers/crypto/nx/nx-aes-ctr.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-aes-xcbc.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/nx/nx.c
drivers/crypto/nx/nx.h
drivers/crypto/omap-des.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/si.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-jz4780.c
drivers/i2c/busses/i2c-xgene-slimpro.c
drivers/i2c/i2c-core.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/synaptics.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mips-gic.c
drivers/memory/omap-gpmc.c
drivers/misc/cxl/api.c
drivers/misc/cxl/context.c
drivers/misc/cxl/main.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/vphb.c
drivers/misc/mei/bus.c
drivers/misc/mei/init.c
drivers/misc/mei/nfc.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/dev.c
drivers/net/can/rcar_can.c
drivers/net/can/slcan.c
drivers/net/can/vcan.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/sfc/ef10_sriov.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/hamradio/bpqether.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/r8152.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wan/z85230.c
drivers/nvdimm/bus.c
drivers/pnp/system.c
drivers/video/fbdev/stifb.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/compat_ioctl.c
fs/dcache.c
fs/ecryptfs/file.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/hpfs/alloc.c
fs/hpfs/dir.c
fs/hpfs/file.c
fs/hpfs/hpfs_fn.h
fs/hpfs/super.c
fs/jfs/ioctl.c
fs/nilfs2/ioctl.c
fs/ocfs2/ioctl.c
fs/overlayfs/inode.c
include/linux/acpi.h
include/linux/buffer_head.h
include/linux/can/skb.h
include/linux/ceph/messenger.h
include/linux/compiler.h
include/linux/irqdesc.h
include/linux/mod_devicetable.h
include/linux/rtc/sirfsoc_rtciobrg.h
include/linux/tick.h
include/linux/timekeeping.h
include/linux/usb/cdc_ncm.h
include/uapi/linux/netconf.h
kernel/auditsc.c
kernel/cpu.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/irq/internals.h
kernel/module.c
kernel/time/clockevents.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-sched.h
lib/Kconfig.kasan
lib/rhashtable.c
mm/memory.c
net/bridge/br_forward.c
net/bridge/br_mdb.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/bridge/br_netlink.c
net/can/af_can.c
net/can/bcm.c
net/can/raw.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/core/dev.c
net/core/gen_estimator.c
net/core/pktgen.c
net/core/rtnetlink.c
net/dsa/dsa.c
net/ipv4/devinet.c
net/ipv4/inet_diag.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/arp_tables.c
net/ipv6/ip6_input.c
net/ipv6/route.c
net/netfilter/nf_queue.c
net/netfilter/nfnetlink.c
net/netlink/af_netlink.c
net/rds/transport.c
net/switchdev/switchdev.c
net/tipc/socket.c
scripts/mod/devicetable-offsets.c
scripts/mod/file2alias.c
scripts/mod/modpost.c
security/selinux/hooks.c
security/selinux/ss/ebitmap.c
tools/include/linux/compiler.h
tools/include/linux/export.h [deleted file]
tools/include/linux/rbtree.h [new file with mode: 0644]
tools/include/linux/rbtree_augmented.h [new file with mode: 0644]
tools/lib/rbtree.c [new file with mode: 0644]
tools/perf/MANIFEST
tools/perf/util/Build
tools/perf/util/include/linux/rbtree.h [deleted file]
tools/perf/util/include/linux/rbtree_augmented.h [deleted file]
tools/testing/nvdimm/Kbuild
tools/testing/nvdimm/test/iomap.c
tools/testing/nvdimm/test/nfit.c

index 1fe2d7fd4108ffd98f0585b23e04f36e40c1cd57..5e38e1582f9545a6c82d0252baa4841e91a577d3 100644 (file)
@@ -36,7 +36,7 @@ SunXi family
         + User Manual
           http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
 
-      - Allwinner A23
+      - Allwinner A23 (sun8i)
         + Datasheet
           http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
         + User Manual
@@ -55,7 +55,23 @@ SunXi family
         + User Manual
           http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
 
+      - Allwinner A33 (sun8i)
+        + Datasheet
+          http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf
+        + User Manual
+          http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf
+
+      - Allwinner H3 (sun8i)
+        + Datasheet
+          http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf
+
     * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
       - Allwinner A80
         + Datasheet
          http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
+
+    * Octa ARM Cortex-A7 based SoCs
+      - Allwinner A83T
+        + Not Supported
+        + Datasheet
+          http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
index 42941fdefb11f163dcc18e7849297f099b76760e..67da20539540cc4e7725deac8748e70fcce357a5 100644 (file)
@@ -9,4 +9,6 @@ using one of the following compatible strings:
   allwinner,sun6i-a31
   allwinner,sun7i-a20
   allwinner,sun8i-a23
+  allwinner,sun8i-a33
+  allwinner,sun8i-h3
   allwinner,sun9i-a80
index 938f8e1ba2051c98302705df219da917b7e5952f..0db60470ebb6aa7f8d236afd4599166dafb0c11b 100644 (file)
@@ -8,6 +8,7 @@ of the EMIF IP and memory parts attached to it.
 Required properties:
 - compatible   : Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
   is the IP revision of the specific EMIF instance.
+                 For am437x should be ti,emif-am4372.
 
 - phy-type     : <u32> indicating the DDR phy type. Following are the
   allowed values
index f732a8321e8a75274a308bf6a6220a588cf00780..8cc17ca718131656aac98d0946ffa2ecad5a1dc8 100644 (file)
@@ -410,8 +410,17 @@ Documentation/usb/persist.txt.
 
 Q: Can I suspend-to-disk using a swap partition under LVM?
 
-A: No. You can suspend successfully, but you'll not be able to
-resume. uswsusp should be able to work with LVM. See suspend.sf.net.
+A: Yes and No.  You can suspend successfully, but the kernel will not be able
+to resume on its own.  You need an initramfs that can recognize the resume
+situation, activate the logical volume containing the swap volume (but not
+touch any filesystems!), and eventually call
+
+echo -n "$major:$minor" > /sys/power/resume
+
+where $major and $minor are the respective major and minor device numbers of
+the swap volume.
+
+uswsusp works with LVM, too.  See http://suspend.sourceforge.net/
 
 Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
 compiled with the similar configuration files. Anyway I found that
index 8133cefb6b6e28715197a86aad555c29edbb7aa1..2d3d55c8f5bea3180f4304bddd7b5c59106e79db 100644 (file)
@@ -1614,6 +1614,7 @@ M:        Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/vexpress*
+F:     arch/arm64/boot/dts/arm/vexpress*
 F:     arch/arm/mach-vexpress/
 F:     */*/vexpress*
 F:     */*/*/vexpress*
@@ -2562,19 +2563,31 @@ F:      arch/powerpc/include/uapi/asm/spu*.h
 F:     arch/powerpc/oprofile/*cell*
 F:     arch/powerpc/platforms/cell/
 
-CEPH DISTRIBUTED FILE SYSTEM CLIENT
+CEPH COMMON CODE (LIBCEPH)
+M:     Ilya Dryomov <idryomov@gmail.com>
 M:     "Yan, Zheng" <zyan@redhat.com>
 M:     Sage Weil <sage@redhat.com>
 L:     ceph-devel@vger.kernel.org
 W:     http://ceph.com/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T:     git git://github.com/ceph/ceph-client.git
 S:     Supported
-F:     Documentation/filesystems/ceph.txt
-F:     fs/ceph/
 F:     net/ceph/
 F:     include/linux/ceph/
 F:     include/linux/crush/
 
+CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
+M:     "Yan, Zheng" <zyan@redhat.com>
+M:     Sage Weil <sage@redhat.com>
+M:     Ilya Dryomov <idryomov@gmail.com>
+L:     ceph-devel@vger.kernel.org
+W:     http://ceph.com/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T:     git git://github.com/ceph/ceph-client.git
+S:     Supported
+F:     Documentation/filesystems/ceph.txt
+F:     fs/ceph/
+
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 L:     linux-usb@vger.kernel.org
 S:     Orphan
@@ -6147,6 +6160,7 @@ L:        linux-nvdimm@lists.01.org
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
 S:     Supported
 F:     drivers/nvdimm/pmem.c
+F:     include/linux/pmem.h
 
 LINUX FOR IBM pSERIES (RS/6000)
 M:     Paul Mackerras <paulus@au.ibm.com>
@@ -6161,7 +6175,7 @@ M:        Michael Ellerman <mpe@ellerman.id.au>
 W:     http://www.penguinppc.org/
 L:     linuxppc-dev@lists.ozlabs.org
 Q:     http://patchwork.ozlabs.org/project/linuxppc-dev/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
 S:     Supported
 F:     Documentation/powerpc/
 F:     arch/powerpc/
@@ -7005,6 +7019,7 @@ F:        include/uapi/linux/netfilter/
 F:     net/*/netfilter.c
 F:     net/*/netfilter/
 F:     net/netfilter/
+F:     net/bridge/br_netfilter*.c
 
 NETLABEL
 M:     Paul Moore <paul@paul-moore.com>
@@ -8366,10 +8381,12 @@ RADOS BLOCK DEVICE (RBD)
 M:     Ilya Dryomov <idryomov@gmail.com>
 M:     Sage Weil <sage@redhat.com>
 M:     Alex Elder <elder@kernel.org>
-M:     ceph-devel@vger.kernel.org
+L:     ceph-devel@vger.kernel.org
 W:     http://ceph.com/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T:     git git://github.com/ceph/ceph-client.git
 S:     Supported
+F:     Documentation/ABI/testing/sysfs-bus-rbd
 F:     drivers/block/rbd.c
 F:     drivers/block/rbd_types.h
 
index 13270c0a93363f079ccc3a112bfa7d7d80c7f429..257ef5892ab7483c973e9fbcbd2d76e02af34e50 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index a750c1425c3a64e8a1f2d138b8bc4d9fd2224b8e..1c5021002fe40b06a2e57daad459bf1b4181b822 100644 (file)
@@ -1693,6 +1693,12 @@ config HIGHMEM
 config HIGHPTE
        bool "Allocate 2nd-level pagetables from highmem"
        depends on HIGHMEM
+       help
+         The VM uses one page of physical memory for each page table.
+         For systems with a lot of processes, this can use a lot of
+         precious low memory, eventually leading to low memory being
+         consumed by page tables.  Setting this option will allow
+         user-space 2nd level page tables to reside in high memory.
 
 config HW_PERF_EVENTS
        bool "Enable hardware performance counter support for perf events"
index f1b15797136649872ab20aa87b89cdcf667b9a05..a2e16f940394c301cdcbdaa00bbd8488127867f2 100644 (file)
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
 
 config DEBUG_SET_MODULE_RONX
        bool "Set loadable kernel module data as NX and text as RO"
-       depends on MODULES
+       depends on MODULES && MMU
        ---help---
          This option helps catch unintended modifications to loadable
          kernel module's text and read-only data. It also prevents execution
index 901739fcb85a37abba32822463caea432637d274..5c42d259fa68fbf29c98439306badd1d44987ee0 100644 (file)
@@ -80,3 +80,7 @@
                status = "okay";
        };
 };
+
+&rtc {
+       system-power-controller;
+};
index c80a3e23379213f67fa92fa7a5ba62f3b44c01c2..ade28c790f4bf6abfbb17de0aaee768e8f324837 100644 (file)
                        };
                };
 
+               emif: emif@4c000000 {
+                       compatible = "ti,emif-am4372";
+                       reg = <0x4c000000 0x1000000>;
+                       ti,hwmods = "emif";
+               };
+
                edma: edma@49000000 {
                        compatible = "ti,edma3";
                        ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
                                ti,hwmods = "dss_rfbi";
                                clocks = <&disp_clk>;
                                clock-names = "fck";
+                               status = "disabled";
                        };
                };
 
index a42cc377a8625c35168d93cbe4fd06430ed3a6ac..a63bf78191ea5a57093e97f1d1fc95d904695816 100644 (file)
        phy-supply = <&ldousb_reg>;
 };
 
+&usb2_phy2 {
+       phy-supply = <&ldousb_reg>;
+};
+
 &usb1 {
        dr_mode = "host";
        pinctrl-names = "default";
index 5dfd3a44bf82b38c614da737cea42c6498ef6a73..3e21311f9514a65dac7ec6f8de7ff9cc42bac13a 100644 (file)
                        compatible = "sirf,atlas7-ioc";
                        reg = <0x18880000 0x1000>,
                                <0x10E40000 0x1000>;
+
+                       audio_ac97_pmx: audio_ac97@0 {
+                               audio_ac97 {
+                                       groups = "audio_ac97_grp";
+                                       function = "audio_ac97";
+                               };
+                       };
+
+                       audio_func_dbg_pmx: audio_func_dbg@0 {
+                               audio_func_dbg {
+                                       groups = "audio_func_dbg_grp";
+                                       function = "audio_func_dbg";
+                               };
+                       };
+
+                       audio_i2s_pmx: audio_i2s@0 {
+                               audio_i2s {
+                                       groups = "audio_i2s_grp";
+                                       function = "audio_i2s";
+                               };
+                       };
+
+                       audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
+                               audio_i2s_2ch {
+                                       groups = "audio_i2s_2ch_grp";
+                                       function = "audio_i2s_2ch";
+                               };
+                       };
+
+                       audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
+                               audio_i2s_extclk {
+                                       groups = "audio_i2s_extclk_grp";
+                                       function = "audio_i2s_extclk";
+                               };
+                       };
+
+                       audio_uart0_pmx: audio_uart0@0 {
+                               audio_uart0 {
+                                       groups = "audio_uart0_grp";
+                                       function = "audio_uart0";
+                               };
+                       };
+
+                       audio_uart1_pmx: audio_uart1@0 {
+                               audio_uart1 {
+                                       groups = "audio_uart1_grp";
+                                       function = "audio_uart1";
+                               };
+                       };
+
+                       audio_uart2_pmx0: audio_uart2@0 {
+                               audio_uart2_0 {
+                                       groups = "audio_uart2_grp0";
+                                       function = "audio_uart2_m0";
+                               };
+                       };
+
+                       audio_uart2_pmx1: audio_uart2@1 {
+                               audio_uart2_1 {
+                                       groups = "audio_uart2_grp1";
+                                       function = "audio_uart2_m1";
+                               };
+                       };
+
+                       c_can_trnsvr_pmx: c_can_trnsvr@0 {
+                               c_can_trnsvr {
+                                       groups = "c_can_trnsvr_grp";
+                                       function = "c_can_trnsvr";
+                               };
+                       };
+
+                       c0_can_pmx0: c0_can@0 {
+                               c0_can_0 {
+                                       groups = "c0_can_grp0";
+                                       function = "c0_can_m0";
+                               };
+                       };
+
+                       c0_can_pmx1: c0_can@1 {
+                               c0_can_1 {
+                                       groups = "c0_can_grp1";
+                                       function = "c0_can_m1";
+                               };
+                       };
+
+                       c1_can_pmx0: c1_can@0 {
+                               c1_can_0 {
+                                       groups = "c1_can_grp0";
+                                       function = "c1_can_m0";
+                               };
+                       };
+
+                       c1_can_pmx1: c1_can@1 {
+                               c1_can_1 {
+                                       groups = "c1_can_grp1";
+                                       function = "c1_can_m1";
+                               };
+                       };
+
+                       c1_can_pmx2: c1_can@2 {
+                               c1_can_2 {
+                                       groups = "c1_can_grp2";
+                                       function = "c1_can_m2";
+                               };
+                       };
+
+                       ca_audio_lpc_pmx: ca_audio_lpc@0 {
+                               ca_audio_lpc {
+                                       groups = "ca_audio_lpc_grp";
+                                       function = "ca_audio_lpc";
+                               };
+                       };
+
+                       ca_bt_lpc_pmx: ca_bt_lpc@0 {
+                               ca_bt_lpc {
+                                       groups = "ca_bt_lpc_grp";
+                                       function = "ca_bt_lpc";
+                               };
+                       };
+
+                       ca_coex_pmx: ca_coex@0 {
+                               ca_coex {
+                                       groups = "ca_coex_grp";
+                                       function = "ca_coex";
+                               };
+                       };
+
+                       ca_curator_lpc_pmx: ca_curator_lpc@0 {
+                               ca_curator_lpc {
+                                       groups = "ca_curator_lpc_grp";
+                                       function = "ca_curator_lpc";
+                               };
+                       };
+
+                       ca_pcm_debug_pmx: ca_pcm_debug@0 {
+                               ca_pcm_debug {
+                                       groups = "ca_pcm_debug_grp";
+                                       function = "ca_pcm_debug";
+                               };
+                       };
+
+                       ca_pio_pmx: ca_pio@0 {
+                               ca_pio {
+                                       groups = "ca_pio_grp";
+                                       function = "ca_pio";
+                               };
+                       };
+
+                       ca_sdio_debug_pmx: ca_sdio_debug@0 {
+                               ca_sdio_debug {
+                                       groups = "ca_sdio_debug_grp";
+                                       function = "ca_sdio_debug";
+                               };
+                       };
+
+                       ca_spi_pmx: ca_spi@0 {
+                               ca_spi {
+                                       groups = "ca_spi_grp";
+                                       function = "ca_spi";
+                               };
+                       };
+
+                       ca_trb_pmx: ca_trb@0 {
+                               ca_trb {
+                                       groups = "ca_trb_grp";
+                                       function = "ca_trb";
+                               };
+                       };
+
+                       ca_uart_debug_pmx: ca_uart_debug@0 {
+                               ca_uart_debug {
+                                       groups = "ca_uart_debug_grp";
+                                       function = "ca_uart_debug";
+                               };
+                       };
+
+                       clkc_pmx0: clkc@0 {
+                               clkc_0 {
+                                       groups = "clkc_grp0";
+                                       function = "clkc_m0";
+                               };
+                       };
+
+                       clkc_pmx1: clkc@1 {
+                               clkc_1 {
+                                       groups = "clkc_grp1";
+                                       function = "clkc_m1";
+                               };
+                       };
+
+                       gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
+                               gn_gnss_i2c {
+                                       groups = "gn_gnss_i2c_grp";
+                                       function = "gn_gnss_i2c";
+                               };
+                       };
+
+                       gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
+                               gn_gnss_uart_nopause {
+                                       groups = "gn_gnss_uart_nopause_grp";
+                                       function = "gn_gnss_uart_nopause";
+                               };
+                       };
+
+                       gn_gnss_uart_pmx: gn_gnss_uart@0 {
+                               gn_gnss_uart {
+                                       groups = "gn_gnss_uart_grp";
+                                       function = "gn_gnss_uart";
+                               };
+                       };
+
+                       gn_trg_spi_pmx0: gn_trg_spi@0 {
+                               gn_trg_spi_0 {
+                                       groups = "gn_trg_spi_grp0";
+                                       function = "gn_trg_spi_m0";
+                               };
+                       };
+
+                       gn_trg_spi_pmx1: gn_trg_spi@1 {
+                               gn_trg_spi_1 {
+                                       groups = "gn_trg_spi_grp1";
+                                       function = "gn_trg_spi_m1";
+                               };
+                       };
+
+                       cvbs_dbg_pmx: cvbs_dbg@0 {
+                               cvbs_dbg {
+                                       groups = "cvbs_dbg_grp";
+                                       function = "cvbs_dbg";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
+                               cvbs_dbg_test_0 {
+                                       groups = "cvbs_dbg_test_grp0";
+                                       function = "cvbs_dbg_test_m0";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
+                               cvbs_dbg_test_1 {
+                                       groups = "cvbs_dbg_test_grp1";
+                                       function = "cvbs_dbg_test_m1";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
+                               cvbs_dbg_test_2 {
+                                       groups = "cvbs_dbg_test_grp2";
+                                       function = "cvbs_dbg_test_m2";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
+                               cvbs_dbg_test_3 {
+                                       groups = "cvbs_dbg_test_grp3";
+                                       function = "cvbs_dbg_test_m3";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
+                               cvbs_dbg_test_4 {
+                                       groups = "cvbs_dbg_test_grp4";
+                                       function = "cvbs_dbg_test_m4";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
+                               cvbs_dbg_test_5 {
+                                       groups = "cvbs_dbg_test_grp5";
+                                       function = "cvbs_dbg_test_m5";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
+                               cvbs_dbg_test_6 {
+                                       groups = "cvbs_dbg_test_grp6";
+                                       function = "cvbs_dbg_test_m6";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
+                               cvbs_dbg_test_7 {
+                                       groups = "cvbs_dbg_test_grp7";
+                                       function = "cvbs_dbg_test_m7";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
+                               cvbs_dbg_test_8 {
+                                       groups = "cvbs_dbg_test_grp8";
+                                       function = "cvbs_dbg_test_m8";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
+                               cvbs_dbg_test_9 {
+                                       groups = "cvbs_dbg_test_grp9";
+                                       function = "cvbs_dbg_test_m9";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
+                               cvbs_dbg_test_10 {
+                                       groups = "cvbs_dbg_test_grp10";
+                                       function = "cvbs_dbg_test_m10";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
+                               cvbs_dbg_test_11 {
+                                       groups = "cvbs_dbg_test_grp11";
+                                       function = "cvbs_dbg_test_m11";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
+                               cvbs_dbg_test_12 {
+                                       groups = "cvbs_dbg_test_grp12";
+                                       function = "cvbs_dbg_test_m12";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
+                               cvbs_dbg_test_13 {
+                                       groups = "cvbs_dbg_test_grp13";
+                                       function = "cvbs_dbg_test_m13";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
+                               cvbs_dbg_test_14 {
+                                       groups = "cvbs_dbg_test_grp14";
+                                       function = "cvbs_dbg_test_m14";
+                               };
+                       };
+
+                       cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
+                               cvbs_dbg_test_15 {
+                                       groups = "cvbs_dbg_test_grp15";
+                                       function = "cvbs_dbg_test_m15";
+                               };
+                       };
+
+                       gn_gnss_power_pmx: gn_gnss_power@0 {
+                               gn_gnss_power {
+                                       groups = "gn_gnss_power_grp";
+                                       function = "gn_gnss_power";
+                               };
+                       };
+
+                       gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
+                               gn_gnss_sw_status {
+                                       groups = "gn_gnss_sw_status_grp";
+                                       function = "gn_gnss_sw_status";
+                               };
+                       };
+
+                       gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
+                               gn_gnss_eclk {
+                                       groups = "gn_gnss_eclk_grp";
+                                       function = "gn_gnss_eclk";
+                               };
+                       };
+
+                       gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
+                               gn_gnss_irq1_0 {
+                                       groups = "gn_gnss_irq1_grp0";
+                                       function = "gn_gnss_irq1_m0";
+                               };
+                       };
+
+                       gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
+                               gn_gnss_irq2_0 {
+                                       groups = "gn_gnss_irq2_grp0";
+                                       function = "gn_gnss_irq2_m0";
+                               };
+                       };
+
+                       gn_gnss_tm_pmx: gn_gnss_tm@0 {
+                               gn_gnss_tm {
+                                       groups = "gn_gnss_tm_grp";
+                                       function = "gn_gnss_tm";
+                               };
+                       };
+
+                       gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
+                               gn_gnss_tsync {
+                                       groups = "gn_gnss_tsync_grp";
+                                       function = "gn_gnss_tsync";
+                               };
+                       };
+
+                       gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
+                               gn_io_gnsssys_sw_cfg {
+                                       groups = "gn_io_gnsssys_sw_cfg_grp";
+                                       function = "gn_io_gnsssys_sw_cfg";
+                               };
+                       };
+
+                       gn_trg_pmx0: gn_trg@0 {
+                               gn_trg_0 {
+                                       groups = "gn_trg_grp0";
+                                       function = "gn_trg_m0";
+                               };
+                       };
+
+                       gn_trg_pmx1: gn_trg@1 {
+                               gn_trg_1 {
+                                       groups = "gn_trg_grp1";
+                                       function = "gn_trg_m1";
+                               };
+                       };
+
+                       gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
+                               gn_trg_shutdown_0 {
+                                       groups = "gn_trg_shutdown_grp0";
+                                       function = "gn_trg_shutdown_m0";
+                               };
+                       };
+
+                       gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
+                               gn_trg_shutdown_1 {
+                                       groups = "gn_trg_shutdown_grp1";
+                                       function = "gn_trg_shutdown_m1";
+                               };
+                       };
+
+                       gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
+                               gn_trg_shutdown_2 {
+                                       groups = "gn_trg_shutdown_grp2";
+                                       function = "gn_trg_shutdown_m2";
+                               };
+                       };
+
+                       gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
+                               gn_trg_shutdown_3 {
+                                       groups = "gn_trg_shutdown_grp3";
+                                       function = "gn_trg_shutdown_m3";
+                               };
+                       };
+
+                       i2c0_pmx: i2c0@0 {
+                               i2c0 {
+                                       groups = "i2c0_grp";
+                                       function = "i2c0";
+                               };
+                       };
+
+                       i2c1_pmx: i2c1@0 {
+                               i2c1 {
+                                       groups = "i2c1_grp";
+                                       function = "i2c1";
+                               };
+                       };
+
+                       jtag_pmx0: jtag@0 {
+                               jtag_0 {
+                                       groups = "jtag_grp0";
+                                       function = "jtag_m0";
+                               };
+                       };
+
+                       ks_kas_spi_pmx0: ks_kas_spi@0 {
+                               ks_kas_spi_0 {
+                                       groups = "ks_kas_spi_grp0";
+                                       function = "ks_kas_spi_m0";
+                               };
+                       };
+
+                       ld_ldd_pmx: ld_ldd@0 {
+                               ld_ldd {
+                                       groups = "ld_ldd_grp";
+                                       function = "ld_ldd";
+                               };
+                       };
+
+                       ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
+                               ld_ldd_16bit {
+                                       groups = "ld_ldd_16bit_grp";
+                                       function = "ld_ldd_16bit";
+                               };
+                       };
+
+                       ld_ldd_fck_pmx: ld_ldd_fck@0 {
+                               ld_ldd_fck {
+                                       groups = "ld_ldd_fck_grp";
+                                       function = "ld_ldd_fck";
+                               };
+                       };
+
+                       ld_ldd_lck_pmx: ld_ldd_lck@0 {
+                               ld_ldd_lck {
+                                       groups = "ld_ldd_lck_grp";
+                                       function = "ld_ldd_lck";
+                               };
+                       };
+
+                       lr_lcdrom_pmx: lr_lcdrom@0 {
+                               lr_lcdrom {
+                                       groups = "lr_lcdrom_grp";
+                                       function = "lr_lcdrom";
+                               };
+                       };
+
+                       lvds_analog_pmx: lvds_analog@0 {
+                               lvds_analog {
+                                       groups = "lvds_analog_grp";
+                                       function = "lvds_analog";
+                               };
+                       };
+
+                       nd_df_pmx: nd_df@0 {
+                               nd_df {
+                                       groups = "nd_df_grp";
+                                       function = "nd_df";
+                               };
+                       };
+
+                       nd_df_nowp_pmx: nd_df_nowp@0 {
+                               nd_df_nowp {
+                                       groups = "nd_df_nowp_grp";
+                                       function = "nd_df_nowp";
+                               };
+                       };
+
+                       ps_pmx: ps@0 {
+                               ps {
+                                       groups = "ps_grp";
+                                       function = "ps";
+                               };
+                       };
+
+                       pwc_core_on_pmx: pwc_core_on@0 {
+                               pwc_core_on {
+                                       groups = "pwc_core_on_grp";
+                                       function = "pwc_core_on";
+                               };
+                       };
+
+                       pwc_ext_on_pmx: pwc_ext_on@0 {
+                               pwc_ext_on {
+                                       groups = "pwc_ext_on_grp";
+                                       function = "pwc_ext_on";
+                               };
+                       };
+
+                       pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
+                               pwc_gpio3_clk {
+                                       groups = "pwc_gpio3_clk_grp";
+                                       function = "pwc_gpio3_clk";
+                               };
+                       };
+
+                       pwc_io_on_pmx: pwc_io_on@0 {
+                               pwc_io_on {
+                                       groups = "pwc_io_on_grp";
+                                       function = "pwc_io_on";
+                               };
+                       };
+
+                       pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
+                               pwc_lowbatt_b_0 {
+                                       groups = "pwc_lowbatt_b_grp0";
+                                       function = "pwc_lowbatt_b_m0";
+                               };
+                       };
+
+                       pwc_mem_on_pmx: pwc_mem_on@0 {
+                               pwc_mem_on {
+                                       groups = "pwc_mem_on_grp";
+                                       function = "pwc_mem_on";
+                               };
+                       };
+
+                       pwc_on_key_b_pmx0: pwc_on_key_b@0 {
+                               pwc_on_key_b_0 {
+                                       groups = "pwc_on_key_b_grp0";
+                                       function = "pwc_on_key_b_m0";
+                               };
+                       };
+
+                       pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
+                               pwc_wakeup_src0 {
+                                       groups = "pwc_wakeup_src0_grp";
+                                       function = "pwc_wakeup_src0";
+                               };
+                       };
+
+                       pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
+                               pwc_wakeup_src1 {
+                                       groups = "pwc_wakeup_src1_grp";
+                                       function = "pwc_wakeup_src1";
+                               };
+                       };
+
+                       pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
+                               pwc_wakeup_src2 {
+                                       groups = "pwc_wakeup_src2_grp";
+                                       function = "pwc_wakeup_src2";
+                               };
+                       };
+
+                       pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
+                               pwc_wakeup_src3 {
+                                       groups = "pwc_wakeup_src3_grp";
+                                       function = "pwc_wakeup_src3";
+                               };
+                       };
+
+                       pw_cko0_pmx0: pw_cko0@0 {
+                               pw_cko0_0 {
+                                       groups = "pw_cko0_grp0";
+                                       function = "pw_cko0_m0";
+                               };
+                       };
+
+                       pw_cko0_pmx1: pw_cko0@1 {
+                               pw_cko0_1 {
+                                       groups = "pw_cko0_grp1";
+                                       function = "pw_cko0_m1";
+                               };
+                       };
+
+                       pw_cko0_pmx2: pw_cko0@2 {
+                               pw_cko0_2 {
+                                       groups = "pw_cko0_grp2";
+                                       function = "pw_cko0_m2";
+                               };
+                       };
+
+                       pw_cko1_pmx0: pw_cko1@0 {
+                               pw_cko1_0 {
+                                       groups = "pw_cko1_grp0";
+                                       function = "pw_cko1_m0";
+                               };
+                       };
+
+                       pw_cko1_pmx1: pw_cko1@1 {
+                               pw_cko1_1 {
+                                       groups = "pw_cko1_grp1";
+                                       function = "pw_cko1_m1";
+                               };
+                       };
+
+                       pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
+                               pw_i2s01_clk_0 {
+                                       groups = "pw_i2s01_clk_grp0";
+                                       function = "pw_i2s01_clk_m0";
+                               };
+                       };
+
+                       pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
+                               pw_i2s01_clk_1 {
+                                       groups = "pw_i2s01_clk_grp1";
+                                       function = "pw_i2s01_clk_m1";
+                               };
+                       };
+
+                       pw_pwm0_pmx: pw_pwm0@0 {
+                               pw_pwm0 {
+                                       groups = "pw_pwm0_grp";
+                                       function = "pw_pwm0";
+                               };
+                       };
+
+                       pw_pwm1_pmx: pw_pwm1@0 {
+                               pw_pwm1 {
+                                       groups = "pw_pwm1_grp";
+                                       function = "pw_pwm1";
+                               };
+                       };
+
+                       pw_pwm2_pmx0: pw_pwm2@0 {
+                               pw_pwm2_0 {
+                                       groups = "pw_pwm2_grp0";
+                                       function = "pw_pwm2_m0";
+                               };
+                       };
+
+                       pw_pwm2_pmx1: pw_pwm2@1 {
+                               pw_pwm2_1 {
+                                       groups = "pw_pwm2_grp1";
+                                       function = "pw_pwm2_m1";
+                               };
+                       };
+
+                       pw_pwm3_pmx0: pw_pwm3@0 {
+                               pw_pwm3_0 {
+                                       groups = "pw_pwm3_grp0";
+                                       function = "pw_pwm3_m0";
+                               };
+                       };
+
+                       pw_pwm3_pmx1: pw_pwm3@1 {
+                               pw_pwm3_1 {
+                                       groups = "pw_pwm3_grp1";
+                                       function = "pw_pwm3_m1";
+                               };
+                       };
+
+                       pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
+                               pw_pwm_cpu_vol_0 {
+                                       groups = "pw_pwm_cpu_vol_grp0";
+                                       function = "pw_pwm_cpu_vol_m0";
+                               };
+                       };
+
+                       pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
+                               pw_pwm_cpu_vol_1 {
+                                       groups = "pw_pwm_cpu_vol_grp1";
+                                       function = "pw_pwm_cpu_vol_m1";
+                               };
+                       };
+
+                       pw_backlight_pmx0: pw_backlight@0 {
+                               pw_backlight_0 {
+                                       groups = "pw_backlight_grp0";
+                                       function = "pw_backlight_m0";
+                               };
+                       };
+
+                       pw_backlight_pmx1: pw_backlight@1 {
+                               pw_backlight_1 {
+                                       groups = "pw_backlight_grp1";
+                                       function = "pw_backlight_m1";
+                               };
+                       };
+
+                       rg_eth_mac_pmx: rg_eth_mac@0 {
+                               rg_eth_mac {
+                                       groups = "rg_eth_mac_grp";
+                                       function = "rg_eth_mac";
+                               };
+                       };
+
+                       rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
+                               rg_gmac_phy_intr_n {
+                                       groups = "rg_gmac_phy_intr_n_grp";
+                                       function = "rg_gmac_phy_intr_n";
+                               };
+                       };
+
+                       rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
+                               rg_rgmii_mac {
+                                       groups = "rg_rgmii_mac_grp";
+                                       function = "rg_rgmii_mac";
+                               };
+                       };
+
+                       rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
+                               rg_rgmii_phy_ref_clk_0 {
+                                       groups =
+                                               "rg_rgmii_phy_ref_clk_grp0";
+                                       function =
+                                               "rg_rgmii_phy_ref_clk_m0";
+                               };
+                       };
+
+                       rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
+                               rg_rgmii_phy_ref_clk_1 {
+                                       groups =
+                                               "rg_rgmii_phy_ref_clk_grp1";
+                                       function =
+                                               "rg_rgmii_phy_ref_clk_m1";
+                               };
+                       };
+
+                       sd0_pmx: sd0@0 {
+                               sd0 {
+                                       groups = "sd0_grp";
+                                       function = "sd0";
+                               };
+                       };
+
+                       sd0_4bit_pmx: sd0_4bit@0 {
+                               sd0_4bit {
+                                       groups = "sd0_4bit_grp";
+                                       function = "sd0_4bit";
+                               };
+                       };
+
+                       sd1_pmx: sd1@0 {
+                               sd1 {
+                                       groups = "sd1_grp";
+                                       function = "sd1";
+                               };
+                       };
+
+                       sd1_4bit_pmx0: sd1_4bit@0 {
+                               sd1_4bit_0 {
+                                       groups = "sd1_4bit_grp0";
+                                       function = "sd1_4bit_m0";
+                               };
+                       };
+
+                       sd1_4bit_pmx1: sd1_4bit@1 {
+                               sd1_4bit_1 {
+                                       groups = "sd1_4bit_grp1";
+                                       function = "sd1_4bit_m1";
+                               };
+                       };
+
+                       sd2_pmx0: sd2@0 {
+                               sd2_0 {
+                                       groups = "sd2_grp0";
+                                       function = "sd2_m0";
+                               };
+                       };
+
+                       sd2_no_cdb_pmx0: sd2_no_cdb@0 {
+                               sd2_no_cdb_0 {
+                                       groups = "sd2_no_cdb_grp0";
+                                       function = "sd2_no_cdb_m0";
+                               };
+                       };
+
+                       sd3_pmx: sd3@0 {
+                               sd3 {
+                                       groups = "sd3_grp";
+                                       function = "sd3";
+                               };
+                       };
+
+                       sd5_pmx: sd5@0 {
+                               sd5 {
+                                       groups = "sd5_grp";
+                                       function = "sd5";
+                               };
+                       };
+
+                       sd6_pmx0: sd6@0 {
+                               sd6_0 {
+                                       groups = "sd6_grp0";
+                                       function = "sd6_m0";
+                               };
+                       };
+
+                       sd6_pmx1: sd6@1 {
+                               sd6_1 {
+                                       groups = "sd6_grp1";
+                                       function = "sd6_m1";
+                               };
+                       };
+
+                       sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
+                               sp0_ext_ldo_on {
+                                       groups = "sp0_ext_ldo_on_grp";
+                                       function = "sp0_ext_ldo_on";
+                               };
+                       };
+
+                       sp0_qspi_pmx: sp0_qspi@0 {
+                               sp0_qspi {
+                                       groups = "sp0_qspi_grp";
+                                       function = "sp0_qspi";
+                               };
+                       };
+
+                       sp1_spi_pmx: sp1_spi@0 {
+                               sp1_spi {
+                                       groups = "sp1_spi_grp";
+                                       function = "sp1_spi";
+                               };
+                       };
+
+                       tpiu_trace_pmx: tpiu_trace@0 {
+                               tpiu_trace {
+                                       groups = "tpiu_trace_grp";
+                                       function = "tpiu_trace";
+                               };
+                       };
+
+                       uart0_pmx: uart0@0 {
+                               uart0 {
+                                       groups = "uart0_grp";
+                                       function = "uart0";
+                               };
+                       };
+
+                       uart0_nopause_pmx: uart0_nopause@0 {
+                               uart0_nopause {
+                                       groups = "uart0_nopause_grp";
+                                       function = "uart0_nopause";
+                               };
+                       };
+
+                       uart1_pmx: uart1@0 {
+                               uart1 {
+                                       groups = "uart1_grp";
+                                       function = "uart1";
+                               };
+                       };
+
+                       uart2_pmx: uart2@0 {
+                               uart2 {
+                                       groups = "uart2_grp";
+                                       function = "uart2";
+                               };
+                       };
+
+                       uart3_pmx0: uart3@0 {
+                               uart3_0 {
+                                       groups = "uart3_grp0";
+                                       function = "uart3_m0";
+                               };
+                       };
+
+                       uart3_pmx1: uart3@1 {
+                               uart3_1 {
+                                       groups = "uart3_grp1";
+                                       function = "uart3_m1";
+                               };
+                       };
+
+                       uart3_pmx2: uart3@2 {
+                               uart3_2 {
+                                       groups = "uart3_grp2";
+                                       function = "uart3_m2";
+                               };
+                       };
+
+                       uart3_pmx3: uart3@3 {
+                               uart3_3 {
+                                       groups = "uart3_grp3";
+                                       function = "uart3_m3";
+                               };
+                       };
+
+                       uart3_nopause_pmx0: uart3_nopause@0 {
+                               uart3_nopause_0 {
+                                       groups = "uart3_nopause_grp0";
+                                       function = "uart3_nopause_m0";
+                               };
+                       };
+
+                       uart3_nopause_pmx1: uart3_nopause@1 {
+                               uart3_nopause_1 {
+                                       groups = "uart3_nopause_grp1";
+                                       function = "uart3_nopause_m1";
+                               };
+                       };
+
+                       uart4_pmx0: uart4@0 {
+                               uart4_0 {
+                                       groups = "uart4_grp0";
+                                       function = "uart4_m0";
+                               };
+                       };
+
+                       uart4_pmx1: uart4@1 {
+                               uart4_1 {
+                                       groups = "uart4_grp1";
+                                       function = "uart4_m1";
+                               };
+                       };
+
+                       uart4_pmx2: uart4@2 {
+                               uart4_2 {
+                                       groups = "uart4_grp2";
+                                       function = "uart4_m2";
+                               };
+                       };
+
+                       uart4_nopause_pmx: uart4_nopause@0 {
+                               uart4_nopause {
+                                       groups = "uart4_nopause_grp";
+                                       function = "uart4_nopause";
+                               };
+                       };
+
+                       usb0_drvvbus_pmx: usb0_drvvbus@0 {
+                               usb0_drvvbus {
+                                       groups = "usb0_drvvbus_grp";
+                                       function = "usb0_drvvbus";
+                               };
+                       };
+
+                       usb1_drvvbus_pmx: usb1_drvvbus@0 {
+                               usb1_drvvbus {
+                                       groups = "usb1_drvvbus_grp";
+                                       function = "usb1_drvvbus";
+                               };
+                       };
+
+                       visbus_dout_pmx: visbus_dout@0 {
+                               visbus_dout {
+                                       groups = "visbus_dout_grp";
+                                       function = "visbus_dout";
+                               };
+                       };
+
+                       vi_vip1_pmx: vi_vip1@0 {
+                               vi_vip1 {
+                                       groups = "vi_vip1_grp";
+                                       function = "vi_vip1";
+                               };
+                       };
+
+                       vi_vip1_ext_pmx: vi_vip1_ext@0 {
+                               vi_vip1_ext {
+                                       groups = "vi_vip1_ext_grp";
+                                       function = "vi_vip1_ext";
+                               };
+                       };
+
+                       vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
+                               vi_vip1_low8bit {
+                                       groups = "vi_vip1_low8bit_grp";
+                                       function = "vi_vip1_low8bit";
+                               };
+                       };
+
+                       vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
+                               vi_vip1_high8bit {
+                                       groups = "vi_vip1_high8bit_grp";
+                                       function = "vi_vip1_high8bit";
+                               };
+                       };
                };
 
                pmipc {
                                clock-names = "gpio0_io";
                                gpio-controller;
                                interrupt-controller;
+
+                               gpio-banks = <2>;
+                               gpio-ranges = <&pinctrl 0 0 0>,
+                                               <&pinctrl 32 0 0>;
+                               gpio-ranges-group-names = "lvds_gpio_grp",
+                                                       "uart_nand_gpio_grp";
                        };
 
                        nand@17050000 {
                                #interrupt-cells = <2>;
                                compatible = "sirf,atlas7-gpio";
                                reg = <0x13300000 0x1000>;
-                               interrupts = <0 43 0>, <0 44 0>, <0 45 0>;
+                               interrupts = <0 43 0>, <0 44 0>,
+                                               <0 45 0>, <0 46 0>;
                                clocks = <&car 84>;
                                clock-names = "gpio1_io";
                                gpio-controller;
                                interrupt-controller;
+
+                               gpio-banks = <4>;
+                               gpio-ranges = <&pinctrl 0 0 0>,
+                                               <&pinctrl 32 0 0>,
+                                               <&pinctrl 64 0 0>,
+                                               <&pinctrl 96 0 0>;
+                               gpio-ranges-group-names = "gnss_gpio_grp",
+                                                       "lcd_vip_gpio_grp",
+                                                       "sdio_i2s_gpio_grp",
+                                                       "sp_rgmii_gpio_grp";
                        };
 
                        sd2: sdhci@14200000 {
                                interrupts = <0 47 0>;
                                gpio-controller;
                                interrupt-controller;
+
+                               gpio-banks = <1>;
+                               gpio-ranges = <&pinctrl 0 0 0>;
+                               gpio-ranges-group-names = "rtc_gpio_grp";
                        };
 
                        rtc-iobg@18840000 {
index aa465904f6cc420ecad93f006bc5e622d8e70b36..096f68be99e2b962a9b9ee86a22a978509477aad 100644 (file)
 
 &dcan1 {
        status = "ok";
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&dcan1_pins_default>;
+       pinctrl-names = "default", "sleep", "active";
+       pinctrl-0 = <&dcan1_pins_sleep>;
        pinctrl-1 = <&dcan1_pins_sleep>;
+       pinctrl-2 = <&dcan1_pins_default>;
 };
index 4e1b60581782c43fc40c9b167e45f90e64024068..803738414086a76f80a8e669142b2caaaa400092 100644 (file)
 
 &dcan1 {
        status = "ok";
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&dcan1_pins_default>;
+       pinctrl-names = "default", "sleep", "active";
+       pinctrl-0 = <&dcan1_pins_sleep>;
        pinctrl-1 = <&dcan1_pins_sleep>;
+       pinctrl-2 = <&dcan1_pins_default>;
 };
 
 &qspi {
index 107395c32d8265863fecb711311def376ab500b6..17f63f7dfd9ed8f3fbe7d4f6365e19c611700ae7 100644 (file)
                        interface-type = "ace";
                        reg = <0x5000 0x1000>;
                };
+
+               pmu@9000 {
+                        compatible = "arm,cci-400-pmu,r0";
+                        reg = <0x9000 0x5000>;
+                        interrupts = <0 105 4>,
+                                     <0 101 4>,
+                                     <0 102 4>,
+                                     <0 103 4>,
+                                     <0 104 4>;
+               };
        };
 
        memory-controller@7ffd0000 {
                             <1 10 0xf08>;
        };
 
-       pmu {
+       pmu_a15 {
                compatible = "arm,cortex-a15-pmu";
                interrupts = <0 68 4>,
                             <0 69 4>;
-               interrupt-affinity = <&cpu0>, <&cpu1>;
+               interrupt-affinity = <&cpu0>,
+                                    <&cpu1>;
+       };
+
+       pmu_a7 {
+               compatible = "arm,cortex-a7-pmu";
+               interrupts = <0 128 4>,
+                            <0 129 4>,
+                            <0 130 4>;
+               interrupt-affinity = <&cpu2>,
+                                    <&cpu3>,
+                                    <&cpu4>;
        };
 
        oscclk6a: oscclk6a {
index 6d83a1bf0c7494593eb608ddba29f2bfea8d8455..5fd8df6f50ea0c4f9d86cbf0b95364c159c17fb1 100644 (file)
@@ -353,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_GPIO_RESTART=y
 CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_SUN6I=y
 CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
index 8ecba00dcd83def3a2ef3abc86d71e1aec942b0f..7ebc346bf9fa56e2dad3ae90914012f0928019b3 100644 (file)
@@ -2,6 +2,7 @@ CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=8
@@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_SUN6I=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
 CONFIG_WATCHDOG=y
@@ -87,6 +87,10 @@ CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_GPIO=y
+CONFIG_FB=y
+CONFIG_FB_SIMPLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
index 1c3938f26beba9837d968be7f75d08dfa2b13136..485982084fe96aef7218aeb134527b41f90beb1e 100644 (file)
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
  * The _caller variety takes a __builtin_return_address(0) value for
  * /proc/vmalloc to use - and should only be used in non-inline functions.
  */
-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
-       size_t, unsigned int, void *);
 extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
        void *);
-
 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
 extern void __iounmap(volatile void __iomem *addr);
-extern void __arm_iounmap(volatile void __iomem *addr);
 
 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
        unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 static inline void memset_io(volatile void __iomem *dst, unsigned c,
        size_t count)
 {
-       memset((void __force *)dst, c, count);
+       extern void mmioset(void *, unsigned int, size_t);
+       mmioset((void __force *)dst, c, count);
 }
 #define memset_io(dst,c,count) memset_io(dst,c,count)
 
 static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
        size_t count)
 {
-       memcpy(to, (const void __force *)from, count);
+       extern void mmiocpy(void *, const void *, size_t);
+       mmiocpy(to, (const void __force *)from, count);
 }
 #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
 
 static inline void memcpy_toio(volatile void __iomem *to, const void *from,
        size_t count)
 {
-       memcpy((void __force *)to, from, count);
+       extern void mmiocpy(void *, const void *, size_t);
+       mmiocpy((void __force *)to, from, count);
 }
 #define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
 
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
 #endif /* readl */
 
 /*
- * ioremap and friends.
+ * ioremap() and friends.
+ *
+ * ioremap() takes a resource address, and size.  Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
+ *
+ * Function            Memory type     Cacheability    Cache hint
+ * ioremap()           Device          n/a             n/a
+ * ioremap_nocache()   Device          n/a             n/a
+ * ioremap_cache()     Normal          Writeback       Read allocate
+ * ioremap_wc()                Normal          Non-cacheable   n/a
+ * ioremap_wt()                Normal          Non-cacheable   n/a
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
  *
- * ioremap takes a PCI memory address, as specified in
- * Documentation/io-mapping.txt.
+ * ioremap_nocache() is the same as ioremap() as there are too many device
+ * drivers using this for device registers, and documentation which tells
+ * people to use it for such for this to be any different.  This is not a
+ * safe fallback for memory-like mappings, or memory regions where the
+ * compiler may generate unaligned accesses - eg, via inlining its own
+ * memcpy.
  *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ *   instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
  */
-#define ioremap(cookie,size)           __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size)   __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cache(cookie,size)     __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE_WC)
-#define ioremap_wt(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE)
-#define iounmap                                __arm_iounmap
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
+#define ioremap_nocache ioremap
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
+
+void iounmap(volatile void __iomem *iomem_cookie);
+#define iounmap iounmap
 
 /*
  * io{read,write}{16,32}be() macros
index 3a72d69b3255e8fa0d53c41ae4df90c126777020..6f225acc07c56bdef12a3ca68ae526d8fe836bf4 100644 (file)
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
  */
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
-#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn)      __va((phys_addr_t)(pfn) << PAGE_SHIFT)
 
 extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
 
index bfd662e49a25efb04b1929f7dba98a0befe3e684..aeddd28b3595515aa70fb849bd65179a7b800443 100644 (file)
 
 /*
  * These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type             Pre-ARMv6       ARMv6+ type / cacheable status
+ * UNCACHED            Uncached        Strongly ordered
+ * BUFFERABLE          Bufferable      Normal memory / non-cacheable
+ * WRITETHROUGH                Writethrough    Normal memory / write through
+ * WRITEBACK           Writeback       Normal memory / write back, read alloc
+ * MINICACHE           Minicache       N/A
+ * WRITEALLOC          Writeback       Normal memory / write back, write alloc
+ * DEV_SHARED          Uncached        Device memory (shared)
+ * DEV_NONSHARED       Uncached        Device memory (non-shared)
+ * DEV_WC              Bufferable      Normal memory / non-cacheable
+ * DEV_CACHED          Writeback       Normal memory / write back, read alloc
+ * VECTORS             Variable        Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
  */
 #define L_PTE_MT_UNCACHED      (_AT(pteval_t, 0x00) << 2)      /* 0000 */
 #define L_PTE_MT_BUFFERABLE    (_AT(pteval_t, 0x01) << 2)      /* 0001 */
index a88671cfe1ffb1e1ee43b06a7c8c6fd704f38580..5e5a51a99e68ec77b38b8101611bc853eb8496af 100644 (file)
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
 
 extern void fpundefinstr(void);
 
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
        /* platform dependent support */
 EXPORT_SYMBOL(arm_delay_ops);
 
@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(__memzero);
 
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
index 7dac3086e361c8e3680d5209e5b72184c9c48cdb..cb4fb1e69778603d41356f3ed7a98695f4cc0cdb 100644 (file)
@@ -410,7 +410,7 @@ ENDPROC(__fiq_abt)
        zero_fp
 
        .if     \trace
-#ifdef CONFIG_IRQSOFF_TRACER
+#ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
 #endif
        ct_user_exit save = 0
index 90dfbedfbfb8527b74ddcfcef125beb151f875f5..3d6b7821cff8c952e73c72cad5a1f8d90cdf1758 100644 (file)
@@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        if ((unsigned)ipinr < NR_IPI) {
-               trace_ipi_entry(ipi_types[ipinr]);
+               trace_ipi_entry_rcuidle(ipi_types[ipinr]);
                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
        }
 
@@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        }
 
        if ((unsigned)ipinr < NR_IPI)
-               trace_ipi_exit(ipi_types[ipinr]);
+               trace_ipi_exit_rcuidle(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }
 
index 7797e81e40e00f72a85ba90dc5abcc1227504b44..64111bd4440b1aa3702c469ce349b303a0244ebd 100644 (file)
 
 /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
 
+ENTRY(mmiocpy)
 ENTRY(memcpy)
 
 #include "copy_template.S"
 
 ENDPROC(memcpy)
+ENDPROC(mmiocpy)
index a4ee97b5a2bfcfd3099353b44e8678c4df5bff6b..3c65e3bd790fe1f7aec59d9568cb4c0799be0e9a 100644 (file)
@@ -16,6 +16,7 @@
        .text
        .align  5
 
+ENTRY(mmioset)
 ENTRY(memset)
 UNWIND( .fnstart         )
        ands    r3, r0, #3              @ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart            )
        b       1b
 UNWIND( .fnend   )
 ENDPROC(memset)
+ENDPROC(mmioset)
index e1a56d87599e640620e506989de7b5f302c49695..1ed4be184a29b16f6f8d97a082fb80d424ca0665 100644 (file)
@@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void)
        u8 revision = dma_read(REVISION, 0) & 0xff;
        printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
                                revision >> 4, revision & 0xf);
-       return;
 }
 
 static unsigned configure_dma_errata(void)
index e03d8b5c9ad0aa174b46c2e54cf2ade518d4cfdc..9ab8932403e5dede46f37dabfd5d8658b01a405e 100644 (file)
@@ -4,6 +4,7 @@ menuconfig ARCH_SIRF
        select ARCH_REQUIRE_GPIOLIB
        select GENERIC_IRQ_CHIP
        select NO_IOPORT_MAP
+       select REGMAP
        select PINCTRL
        select PINCTRL_SIRF
        help
index 8f66d8f7ca75e5d12ccbfb14b5f632918b722edb..d4852d24dc7d44010c400ba9425847f96c81eee8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
+ * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
  * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
  *
  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/regmap.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
@@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr)
 {
        unsigned long flags, val;
 
+       /* TODO: add hwspinlock to sync with M3 */
        spin_lock_irqsave(&rtciobrg_lock, flags);
 
        val = __sirfsoc_rtc_iobrg_readl(addr);
@@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
 {
        unsigned long flags;
 
+        /* TODO: add hwspinlock to sync with M3 */
        spin_lock_irqsave(&rtciobrg_lock, flags);
 
        sirfsoc_rtc_iobrg_pre_writel(val, addr);
@@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
 }
 EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
 
+
+static int regmap_iobg_regwrite(void *context, unsigned int reg,
+                                  unsigned int val)
+{
+       sirfsoc_rtc_iobrg_writel(val, reg);
+       return 0;
+}
+
+static int regmap_iobg_regread(void *context, unsigned int reg,
+                                 unsigned int *val)
+{
+       *val = (u32)sirfsoc_rtc_iobrg_readl(reg);
+       return 0;
+}
+
+static struct regmap_bus regmap_iobg = {
+       .reg_write = regmap_iobg_regwrite,
+       .reg_read = regmap_iobg_regread,
+};
+
+/**
+ * devm_regmap_init_iobg(): Initialise managed register map
+ *
+ * @iobg: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+                                   const struct regmap_config *config)
+{
+       const struct regmap_bus *bus = &regmap_iobg;
+
+       return devm_regmap_init(dev, bus, dev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
+
 static const struct of_device_id rtciobrg_ids[] = {
        { .compatible = "sirf,prima2-rtciobg" },
        {}
@@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void)
 }
 postcore_initcall(sirfsoc_rtciobrg_init);
 
-MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, "
-               "Barry Song <baohua.song@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
+MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
 MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
 MODULE_LICENSE("GPL v2");
index 81502b90dd9130240bd716d4bfb866b9d5ac5efe..4efe2d43a126b4ca13c42aa08fc67ce11c898d35 100644 (file)
@@ -35,7 +35,7 @@ config MACH_SUN7I
        select SUN5I_HSTIMER
 
 config MACH_SUN8I
-       bool "Allwinner A23 (sun8i) SoCs support"
+       bool "Allwinner sun8i Family SoCs support"
        default ARCH_SUNXI
        select ARM_GIC
        select MFD_SUN6I_PRCM
index 1bc811a74a9f4b23c5b78822c5ad6e19a432857b..65bab2876343aff24707105f0432c90faaf90e60 100644 (file)
@@ -67,10 +67,13 @@ MACHINE_END
 
 static const char * const sun8i_board_dt_compat[] = {
        "allwinner,sun8i-a23",
+       "allwinner,sun8i-a33",
+       "allwinner,sun8i-h3",
        NULL,
 };
 
-DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family")
+DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
+       .init_time      = sun6i_timer_init,
        .dt_compat      = sun8i_board_dt_compat,
        .init_late      = sunxi_dt_cpufreq_init,
 MACHINE_END
index d1e5ad7ab3bc6ae9b3a538a3601bccf929b0e998..0c81056c1dd7edac30e1d86bdd2103d1e2d6832f 100644 (file)
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
 }
 #endif
 
-void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
        unsigned long offset, size_t size, unsigned int mtype, void *caller)
 {
        const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
                  unsigned int mtype)
 {
        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
-                       __builtin_return_address(0));
+                                       __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
                                      unsigned int, void *) =
        __arm_ioremap_caller;
 
-void __iomem *
-__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+       return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+                                  __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+       return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+                                  __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 {
-       return arch_ioremap_caller(phys_addr, size, mtype,
-               __builtin_return_address(0));
+       return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+                                  __builtin_return_address(0));
 }
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap_wc);
 
 /*
  * Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
 
 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 
-void __arm_iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *cookie)
 {
-       arch_iounmap(io_addr);
+       arch_iounmap(cookie);
 }
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
 
 #ifdef CONFIG_PCI
 static int pci_ioremap_mem_type = MT_DEVICE;
index 6ca7d9aa896f4e14804e1096407b16e9b1020456..870838a46d524141a9cb1d65980c1ba7456465cb 100644 (file)
@@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void)
        int highmem = 0;
        phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
        struct memblock_region *reg;
+       bool should_use_highmem = false;
 
        for_each_memblock(memory, reg) {
                phys_addr_t block_start = reg->base;
@@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void)
                                pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
                                          &block_start, &block_end);
                                memblock_remove(reg->base, reg->size);
+                               should_use_highmem = true;
                                continue;
                        }
 
@@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void)
                                          &block_start, &block_end, &vmalloc_limit);
                                memblock_remove(vmalloc_limit, overlap_size);
                                block_end = vmalloc_limit;
+                               should_use_highmem = true;
                        }
                }
 
@@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void)
                }
        }
 
+       if (should_use_highmem)
+               pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+
        high_memory = __va(arm_lowmem_limit - 1) + 1;
 
        /*
@@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc)
        build_mem_type_table();
        prepare_page_table();
        map_lowmem();
+       memblock_set_current_limit(arm_lowmem_limit);
        dma_contiguous_remap();
        devicemaps_init(mdesc);
        kmap_init();
index afd7e05d95f1884becf11913aa4b075048a4d4f8..1dd10936d68d0422b328128fbb6a234d77adee1d 100644 (file)
@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
-void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
-                          size_t size, unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
+                                  unsigned int mtype, void *caller)
 {
-       return __arm_ioremap_pfn(pfn, offset, size, mtype);
+       return (void __iomem *)phys_addr;
 }
 
-void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
-                           unsigned int mtype)
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 {
-       return (void __iomem *)phys_addr;
+       return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
+                                   __builtin_return_address(0));
 }
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap);
 
-void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+       return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+                                   __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
 
-void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
-                                  unsigned int mtype, void *caller)
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+       return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+                                   __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iounmap(volatile void __iomem *addr)
 {
-       return __arm_ioremap(phys_addr, size, mtype);
 }
+EXPORT_SYMBOL(__iounmap);
 
 void (*arch_iounmap)(volatile void __iomem *);
 
-void __arm_iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
 }
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
index 9005b07296c8b38c728ff6466ebd365fda1753ed..aedec81d11988181e3e6f0631113fd01e4737ac8 100644 (file)
  * it does.
  */
 
-#define _GNU_SOURCE
-
 #include <byteswap.h>
 #include <elf.h>
 #include <errno.h>
-#include <error.h>
 #include <fcntl.h>
+#include <stdarg.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
 #define EF_ARM_ABI_FLOAT_HARD 0x400
 #endif
 
+static int failed;
+static const char *argv0;
 static const char *outfile;
 
+static void fail(const char *fmt, ...)
+{
+       va_list ap;
+
+       failed = 1;
+       fprintf(stderr, "%s: ", argv0);
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
+       exit(EXIT_FAILURE);
+}
+
 static void cleanup(void)
 {
-       if (error_message_count > 0 && outfile != NULL)
+       if (failed && outfile != NULL)
                unlink(outfile);
 }
 
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
        int infd;
 
        atexit(cleanup);
+       argv0 = argv[0];
 
        if (argc != 3)
-               error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+               fail("Usage: %s [infile] [outfile]\n", argv[0]);
 
        infile = argv[1];
        outfile = argv[2];
 
        infd = open(infile, O_RDONLY);
        if (infd < 0)
-               error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+               fail("Cannot open %s: %s\n", infile, strerror(errno));
 
        if (fstat(infd, &stat) != 0)
-               error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+               fail("Failed stat for %s: %s\n", infile, strerror(errno));
 
        inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
        if (inbuf == MAP_FAILED)
-               error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+               fail("Failed to map %s: %s\n", infile, strerror(errno));
 
        close(infd);
 
        inhdr = inbuf;
 
        if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
-               error(EXIT_FAILURE, 0, "Not an ELF file");
+               fail("Not an ELF file\n");
 
        if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
-               error(EXIT_FAILURE, 0, "Unsupported ELF class");
+               fail("Unsupported ELF class\n");
 
        swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
 
        if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
-               error(EXIT_FAILURE, 0, "Not a shared object");
+               fail("Not a shared object\n");
 
-       if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
-               error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
-                     inhdr->e_machine);
-       }
+       if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
+               fail("Unsupported architecture %#x\n", inhdr->e_machine);
 
        e_flags = read_elf_word(inhdr->e_flags, swap);
 
        if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
-               error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
-                     EF_ARM_EABI_VERSION(e_flags));
+               fail("Unsupported EABI version %#x\n",
+                    EF_ARM_EABI_VERSION(e_flags));
        }
 
        if (e_flags & EF_ARM_ABI_FLOAT_HARD)
-               error(EXIT_FAILURE, 0,
-                     "Unexpected hard-float flag set in e_flags");
+               fail("Unexpected hard-float flag set in e_flags\n");
 
        clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
 
        outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
        if (outfd < 0)
-               error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+               fail("Cannot open %s: %s\n", outfile, strerror(errno));
 
        if (ftruncate(outfd, stat.st_size) != 0)
-               error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+               fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
 
        outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
                      outfd, 0);
        if (outbuf == MAP_FAILED)
-               error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+               fail("Failed to map %s: %s\n", outfile, strerror(errno));
 
        close(outfd);
 
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
        }
 
        if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
-               error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+               fail("Failed to sync %s: %s\n", outfile, strerror(errno));
 
        return EXIT_SUCCESS;
 }
index 0f6edb14b7e436bc60fee0908e3ff7a45cd3ff41..318175f62c24d0ec4df68a555d02f2bd2a920cfc 100644 (file)
@@ -23,9 +23,9 @@ config ARM64
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
-       select EDAC_SUPPORT
        select CPU_PM if (SUSPEND || CPU_IDLE)
        select DCACHE_WORD_ACCESS
+       select EDAC_SUPPORT
        select GENERIC_ALLOCATOR
        select GENERIC_CLOCKEVENTS
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
index 83578e766b945ae2c35b41b93813a8c29ec15c1b..4c55833d8a41a0b361faaf66ad72dffcc6885f02 100644 (file)
                device_type = "memory";
                reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
        };
+
+       gpio-keys {
+               compatible = "gpio-keys";
+               button@1 {
+                       label = "POWER";
+                       linux,code = <116>;
+                       linux,input-type = <0x1>;
+                       interrupts = <0x0 0x2d 0x1>;
+               };
+       };
 };
 
 &pcie0clk {
index c5c98b91514e90186e6bbe432a1c0a7d880ed9e9..bb3c072096760836ad004b1e9bd485ecfaeb9b24 100644 (file)
@@ -1,6 +1,7 @@
 dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb
 dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb
 dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
 
 always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
new file mode 100644 (file)
index 0000000..5b1d018
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ * LogicTile Express 20MG
+ * V2F-1XV7
+ *
+ * Cortex-A53 (2 cores) Soft Macrocell Model
+ *
+ * HBI-0247C
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+       model = "V2F-1XV7 Cortex-A53x2 SMM";
+       arm,hbi = <0x247>;
+       arm,vexpress,site = <0xf>;
+       compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress";
+       interrupt-parent = <&gic>;
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       chosen {
+               stdout-path = "serial0:38400n8";
+       };
+
+       aliases {
+               serial0 = &v2m_serial0;
+               serial1 = &v2m_serial1;
+               serial2 = &v2m_serial2;
+               serial3 = &v2m_serial3;
+               i2c0 = &v2m_i2c_dvi;
+               i2c1 = &v2m_i2c_pcie;
+       };
+
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a53", "arm,armv8";
+                       reg = <0 0>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a53", "arm,armv8";
+                       reg = <0 1>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache0 {
+                       compatible = "cache";
+               };
+       };
+
+       memory@80000000 {
+               device_type = "memory";
+               reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */
+       };
+
+       gic: interrupt-controller@2c001000 {
+               compatible = "arm,gic-400";
+               #interrupt-cells = <3>;
+               #address-cells = <0>;
+               interrupt-controller;
+               reg = <0 0x2c001000 0 0x1000>,
+                     <0 0x2c002000 0 0x2000>,
+                     <0 0x2c004000 0 0x2000>,
+                     <0 0x2c006000 0 0x2000>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+       };
+
+       timer {
+               compatible = "arm,armv8-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       pmu {
+               compatible = "arm,armv8-pmuv3";
+               interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+       };
+
+       dcc {
+               compatible = "arm,vexpress,config-bus";
+               arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+               smbclk: osc@4 {
+                       /* SMC clock */
+                       compatible = "arm,vexpress-osc";
+                       arm,vexpress-sysreg,func = <1 4>;
+                       freq-range = <40000000 40000000>;
+                       #clock-cells = <0>;
+                       clock-output-names = "smclk";
+               };
+
+               volt@0 {
+                       /* VIO to expansion board above */
+                       compatible = "arm,vexpress-volt";
+                       arm,vexpress-sysreg,func = <2 0>;
+                       regulator-name = "VIO_UP";
+                       regulator-min-microvolt = <800000>;
+                       regulator-max-microvolt = <1800000>;
+                       regulator-always-on;
+               };
+
+               volt@1 {
+                       /* 12V from power connector J6 */
+                       compatible = "arm,vexpress-volt";
+                       arm,vexpress-sysreg,func = <2 1>;
+                       regulator-name = "12";
+                       regulator-always-on;
+               };
+
+               temp@0 {
+                       /* FPGA temperature */
+                       compatible = "arm,vexpress-temp";
+                       arm,vexpress-sysreg,func = <4 0>;
+                       label = "FPGA";
+               };
+       };
+
+       smb {
+               compatible = "simple-bus";
+
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0 0 0x08000000 0x04000000>,
+                        <1 0 0 0x14000000 0x04000000>,
+                        <2 0 0 0x18000000 0x04000000>,
+                        <3 0 0 0x1c000000 0x04000000>,
+                        <4 0 0 0x0c000000 0x04000000>,
+                        <5 0 0 0x10000000 0x04000000>;
+
+               #interrupt-cells = <1>;
+               interrupt-map-mask = <0 0 63>;
+               interrupt-map = <0 0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+               /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi"
+       };
+};
index d8c0bdc51882a18cc311431e8e17776f5847ca30..9cb7cf94284a66b07619f4b2a4ab042c803c5f03 100644 (file)
                gic0: interrupt-controller@8010,00000000 {
                        compatible = "arm,gic-v3";
                        #interrupt-cells = <3>;
+                       #address-cells = <2>;
+                       #size-cells = <2>;
+                       ranges;
                        interrupt-controller;
                        reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */
                              <0x8010 0x80000000 0x0 0x600000>; /* GICR */
                        interrupts = <1 9 0xf04>;
+
+                       its: gic-its@8010,00020000 {
+                               compatible = "arm,gic-v3-its";
+                               msi-controller;
+                               reg = <0x8010 0x20000 0x0 0x200000>;
+                       };
                };
 
                uaa0: serial@87e0,24000000 {
index f38c94f1d89834ebec7c4ab4d514e27b368e6bd5..4e17e7ede33d5a3ebacd8c3a13d31916b009a36b 100644 (file)
@@ -83,6 +83,7 @@ CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
 CONFIG_AHCI_XGENE=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
index 39248d3adf5d8b2858a9e01aa92209e70f78772b..406485ed110af9fb12ce8040815355442d088444 100644 (file)
 #include <asm/psci.h>
 #include <asm/smp_plat.h>
 
+/* Macros for consistency checks of the GICC subtable of MADT */
+#define ACPI_MADT_GICC_LENGTH  \
+       (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
+
+#define BAD_MADT_GICC_ENTRY(entry, end)                                                \
+       (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) ||       \
+        (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
 /* ACPI table mapping after acpi_gbl_permanent_mmap is set */
index a7691a37866834375283b56501e6b11f23f49de8..f860bfda454afb9de7007684ba60eaf7f375e130 100644 (file)
@@ -352,8 +352,8 @@ el1_inv:
        // TODO: add support for undefined instructions in kernel mode
        enable_dbg
        mov     x0, sp
+       mov     x2, x1
        mov     x1, #BAD_SYNC
-       mrs     x2, esr_el1
        b       bad_mode
 ENDPROC(el1_sync)
 
@@ -553,7 +553,7 @@ el0_inv:
        ct_user_exit
        mov     x0, sp
        mov     x1, #BAD_SYNC
-       mrs     x2, esr_el1
+       mov     x2, x25
        bl      bad_mode
        b       ret_to_user
 ENDPROC(el0_sync)
index bd9bfaa9269bc59980e89209e22ac0286288f9ef..f332d5d1f6b40c1b5382301d103fe3ed588ea4c1 100644 (file)
 
 ENTRY(compat_sys_sigreturn_wrapper)
        mov     x0, sp
-       mov     x27, #0         // prevent syscall restart handling (why)
        b       compat_sys_sigreturn
 ENDPROC(compat_sys_sigreturn_wrapper)
 
 ENTRY(compat_sys_rt_sigreturn_wrapper)
        mov     x0, sp
-       mov     x27, #0         // prevent syscall restart handling (why)
        b       compat_sys_rt_sigreturn
 ENDPROC(compat_sys_rt_sigreturn_wrapper)
 
index 695801a54ca54dac465c2b421a822658e571cc88..50fb4696654ea56b2efc28741c6c45d38d796235 100644 (file)
@@ -438,7 +438,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
        struct acpi_madt_generic_interrupt *processor;
 
        processor = (struct acpi_madt_generic_interrupt *)header;
-       if (BAD_MADT_ENTRY(processor, end))
+       if (BAD_MADT_GICC_ENTRY(processor, end))
                return -EINVAL;
 
        acpi_table_print_madt_entry(header);
index 9d84feb41a16601e411976aff3a2bc87a0d7dcbe..773d37a14039d9bb456f0896e022c4a3e1415773 100644 (file)
@@ -4,5 +4,3 @@ obj-y                           := dma-mapping.o extable.o fault.o init.o \
                                   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 obj-$(CONFIG_ARM64_PTDUMP)     += dump.o
-
-CFLAGS_mmu.o                   := -I$(srctree)/scripts/dtc/libfdt/
index 4dda9bd6b8fbd17219efc7d103207719282e5a33..e989cee7741489b0eaec0069d0c5fe87bcce1e4a 100644 (file)
@@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port)
                if (port->write_ts_idx == NBR_IN_DESCR)
                        port->write_ts_idx = 0;
                idx = port->write_ts_idx++;
-               do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+               ktime_get_ts(&port->timestamp[idx]);
                port->in_buffer_len += port->inbufchunk;
        }
        spin_unlock_irqrestore(&port->lock, flags);
index 2a14585c90d254a645a3af9705c8aed12cfac099..aab7e46cadd5d843f3f7252a494fa9b0ef9af0c2 100644 (file)
@@ -2231,7 +2231,7 @@ config MIPS_CMP
 
 config MIPS_CPS
        bool "MIPS Coherent Processing System support"
-       depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
+       depends on SYS_SUPPORTS_MIPS_CPS
        select MIPS_CM
        select MIPS_CPC
        select MIPS_CPS_PM if HOTPLUG_CPU
index 37c08a27b4f00a338d46d1fdbef718eac62809f1..c9f7e231e66bbe78a86412516bee7301b5cf8027 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
- *                    Insititute of Computing Technology
+ *                    Institute of Computing Technology
  * Author:  Xiang Gao, gaoxiang@ict.ac.cn
  *          Huacai Chen, chenhc@lemote.com
  *          Xiaofu Meng, Shuangshuang Zhang
index 2b25d1ba1ea037ca82212ec542a165e714ecf29c..16f1ea9ab191234ee8dc5599803b91dcc2ccf745 100644 (file)
@@ -23,6 +23,7 @@
 extern int smp_num_siblings;
 extern cpumask_t cpu_sibling_map[];
 extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map;
 
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
index c0c5e5972256e95273d7e34a0b4c3c4132232cd0..d8f9b357b2226bef0141c8ed6340baa8fb7c0f4f 100644 (file)
@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                break;
 
        case blezl_op: /* not really i_format */
-               if (NO_R6EMU)
+               if (!insn.i_format.rt && NO_R6EMU)
                        goto sigill_r6;
        case blez_op:
                /*
@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                break;
 
        case bgtzl_op:
-               if (NO_R6EMU)
+               if (!insn.i_format.rt && NO_R6EMU)
                        goto sigill_r6;
        case bgtz_op:
                /*
index 55b759a0019e61671d78919bc5b143ba5ff4a94d..1b6ca634e6465b6fea48eaa81d45dec259e445e2 100644 (file)
@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
         nop
 
        /* This is an NMI */
-       la      k0, nmi_handler
+       PTR_LA  k0, nmi_handler
        jr      k0
         nop
 
@@ -107,10 +107,10 @@ not_nmi:
        mul     t1, t1, t0
        mul     t1, t1, t2
 
-       li      a0, KSEG0
-       add     a1, a0, t1
+       li      a0, CKSEG0
+       PTR_ADD a1, a0, t1
 1:     cache   Index_Store_Tag_I, 0(a0)
-       add     a0, a0, t0
+       PTR_ADD a0, a0, t0
        bne     a0, a1, 1b
         nop
 icache_done:
@@ -134,12 +134,12 @@ icache_done:
        mul     t1, t1, t0
        mul     t1, t1, t2
 
-       li      a0, KSEG0
-       addu    a1, a0, t1
-       subu    a1, a1, t0
+       li      a0, CKSEG0
+       PTR_ADDU a1, a0, t1
+       PTR_SUBU a1, a1, t0
 1:     cache   Index_Store_Tag_D, 0(a0)
        bne     a0, a1, 1b
-        add    a0, a0, t0
+        PTR_ADD a0, a0, t0
 dcache_done:
 
        /* Set Kseg0 CCA to that in s0 */
@@ -152,11 +152,11 @@ dcache_done:
 
        /* Enter the coherent domain */
        li      t0, 0xff
-       sw      t0, GCR_CL_COHERENCE_OFS(v1)
+       PTR_S   t0, GCR_CL_COHERENCE_OFS(v1)
        ehb
 
        /* Jump to kseg0 */
-       la      t0, 1f
+       PTR_LA  t0, 1f
        jr      t0
         nop
 
@@ -178,9 +178,9 @@ dcache_done:
         nop
 
        /* Off we go! */
-       lw      t1, VPEBOOTCFG_PC(v0)
-       lw      gp, VPEBOOTCFG_GP(v0)
-       lw      sp, VPEBOOTCFG_SP(v0)
+       PTR_L   t1, VPEBOOTCFG_PC(v0)
+       PTR_L   gp, VPEBOOTCFG_GP(v0)
+       PTR_L   sp, VPEBOOTCFG_SP(v0)
        jr      t1
         nop
        END(mips_cps_core_entry)
@@ -217,7 +217,7 @@ LEAF(excep_intex)
 
 .org 0x480
 LEAF(excep_ejtag)
-       la      k0, ejtag_debug_handler
+       PTR_LA  k0, ejtag_debug_handler
        jr      k0
         nop
        END(excep_ejtag)
@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
         nop
 
        .set    push
-       .set    mips32r2
+       .set    mips64r2
        .set    mt
 
        /* Only allow 1 TC per VPE to execute... */
@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
 
        /* ...and for the moment only 1 VPE */
        dvpe
-       la      t1, 1f
+       PTR_LA  t1, 1f
        jr.hb   t1
         nop
 
@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
        mfc0    t0, CP0_MVPCONF0
        srl     t0, t0, MVPCONF0_PVPE_SHIFT
        andi    t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
-       addiu   t7, t0, 1
+       addiu   ta3, t0, 1
 
        /* If there's only 1, we're done */
        beqz    t0, 2f
         nop
 
        /* Loop through each VPE within this core */
-       li      t5, 1
+       li      ta1, 1
 
 1:     /* Operate on the appropriate TC */
-       mtc0    t5, CP0_VPECONTROL
+       mtc0    ta1, CP0_VPECONTROL
        ehb
 
        /* Bind TC to VPE (1:1 TC:VPE mapping) */
-       mttc0   t5, CP0_TCBIND
+       mttc0   ta1, CP0_TCBIND
 
        /* Set exclusive TC, non-active, master */
        li      t0, VPECONF0_MVP
-       sll     t1, t5, VPECONF0_XTC_SHIFT
+       sll     t1, ta1, VPECONF0_XTC_SHIFT
        or      t0, t0, t1
        mttc0   t0, CP0_VPECONF0
 
@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
        mttc0   t0, CP0_TCHALT
 
        /* Next VPE */
-       addiu   t5, t5, 1
-       slt     t0, t5, t7
+       addiu   ta1, ta1, 1
+       slt     t0, ta1, ta3
        bnez    t0, 1b
         nop
 
@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init)
 
 LEAF(mips_cps_boot_vpes)
        /* Retrieve CM base address */
-       la      t0, mips_cm_base
-       lw      t0, 0(t0)
+       PTR_LA  t0, mips_cm_base
+       PTR_L   t0, 0(t0)
 
        /* Calculate a pointer to this cores struct core_boot_config */
-       lw      t0, GCR_CL_ID_OFS(t0)
+       PTR_L   t0, GCR_CL_ID_OFS(t0)
        li      t1, COREBOOTCFG_SIZE
        mul     t0, t0, t1
-       la      t1, mips_cps_core_bootcfg
-       lw      t1, 0(t1)
-       addu    t0, t0, t1
+       PTR_LA  t1, mips_cps_core_bootcfg
+       PTR_L   t1, 0(t1)
+       PTR_ADDU t0, t0, t1
 
        /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
-       has_mt  t6, 1f
+       has_mt  ta2, 1f
         li     t9, 0
 
        /* Find the number of VPEs present in the core */
@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes)
 1:     /* Calculate a pointer to this VPEs struct vpe_boot_config */
        li      t1, VPEBOOTCFG_SIZE
        mul     v0, t9, t1
-       lw      t7, COREBOOTCFG_VPECONFIG(t0)
-       addu    v0, v0, t7
+       PTR_L   ta3, COREBOOTCFG_VPECONFIG(t0)
+       PTR_ADDU v0, v0, ta3
 
 #ifdef CONFIG_MIPS_MT
 
        /* If the core doesn't support MT then return */
-       bnez    t6, 1f
+       bnez    ta2, 1f
         nop
        jr      ra
         nop
 
        .set    push
-       .set    mips32r2
+       .set    mips64r2
        .set    mt
 
 1:     /* Enter VPE configuration state */
        dvpe
-       la      t1, 1f
+       PTR_LA  t1, 1f
        jr.hb   t1
         nop
 1:     mfc0    t1, CP0_MVPCONTROL
@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
        ehb
 
        /* Loop through each VPE */
-       lw      t6, COREBOOTCFG_VPEMASK(t0)
-       move    t8, t6
-       li      t5, 0
+       PTR_L   ta2, COREBOOTCFG_VPEMASK(t0)
+       move    t8, ta2
+       li      ta1, 0
 
        /* Check whether the VPE should be running. If not, skip it */
-1:     andi    t0, t6, 1
+1:     andi    t0, ta2, 1
        beqz    t0, 2f
         nop
 
@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
        mfc0    t0, CP0_VPECONTROL
        ori     t0, t0, VPECONTROL_TARGTC
        xori    t0, t0, VPECONTROL_TARGTC
-       or      t0, t0, t5
+       or      t0, t0, ta1
        mtc0    t0, CP0_VPECONTROL
        ehb
 
@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
 
        /* Calculate a pointer to the VPEs struct vpe_boot_config */
        li      t0, VPEBOOTCFG_SIZE
-       mul     t0, t0, t5
-       addu    t0, t0, t7
+       mul     t0, t0, ta1
+       addu    t0, t0, ta3
 
        /* Set the TC restart PC */
        lw      t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
        mttc0   t0, CP0_VPECONF0
 
        /* Next VPE */
-2:     srl     t6, t6, 1
-       addiu   t5, t5, 1
-       bnez    t6, 1b
+2:     srl     ta2, ta2, 1
+       addiu   ta1, ta1, 1
+       bnez    ta2, 1b
         nop
 
        /* Leave VPE configuration state */
@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
        /* This VPE should be offline, halt the TC */
        li      t0, TCHALT_H
        mtc0    t0, CP0_TCHALT
-       la      t0, 1f
+       PTR_LA  t0, 1f
 1:     jr.hb   t0
         nop
 
@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
        .set    noat
        lw      $1, TI_CPU(gp)
        sll     $1, $1, LONGLOG
-       la      \dest, __per_cpu_offset
+       PTR_LA  \dest, __per_cpu_offset
        addu    $1, $1, \dest
        lw      $1, 0($1)
-       la      \dest, cps_cpu_state
+       PTR_LA  \dest, cps_cpu_state
        addu    \dest, \dest, $1
        .set    pop
        .endm
index 6e8de80bb4468c82378d3c4af8ef3d08cefd5cf1..4cc13508d967c4076e0b720b120487281003ed72 100644 (file)
@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp)
        .set    noreorder
        .set    nomacro
 
-1:     user_lw(t5, 16(t0))             # argument #5 from usp
-4:     user_lw(t6, 20(t0))             # argument #6 from usp
-3:     user_lw(t7, 24(t0))             # argument #7 from usp
-2:     user_lw(t8, 28(t0))             # argument #8 from usp
+load_a4: user_lw(t5, 16(t0))           # argument #5 from usp
+load_a5: user_lw(t6, 20(t0))           # argument #6 from usp
+load_a6: user_lw(t7, 24(t0))           # argument #7 from usp
+load_a7: user_lw(t8, 28(t0))           # argument #8 from usp
+loads_done:
 
        sw      t5, 16(sp)              # argument #5 to ksp
        sw      t6, 20(sp)              # argument #6 to ksp
@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp)
        .set    pop
 
        .section __ex_table,"a"
-       PTR     1b,bad_stack
-       PTR     2b,bad_stack
-       PTR     3b,bad_stack
-       PTR     4b,bad_stack
+       PTR     load_a4, bad_stack_a4
+       PTR     load_a5, bad_stack_a5
+       PTR     load_a6, bad_stack_a6
+       PTR     load_a7, bad_stack_a7
        .previous
 
        lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
@@ -153,8 +154,8 @@ syscall_trace_entry:
 /* ------------------------------------------------------------------------ */
 
        /*
-        * The stackpointer for a call with more than 4 arguments is bad.
-        * We probably should handle this case a bit more drastic.
+        * Our open-coded access area sanity test for the stack pointer
+        * failed. We probably should handle this case a bit more drastic.
         */
 bad_stack:
        li      v0, EFAULT
@@ -163,6 +164,22 @@ bad_stack:
        sw      t0, PT_R7(sp)
        j       o32_syscall_exit
 
+bad_stack_a4:
+       li      t5, 0
+       b       load_a5
+
+bad_stack_a5:
+       li      t6, 0
+       b       load_a6
+
+bad_stack_a6:
+       li      t7, 0
+       b       load_a7
+
+bad_stack_a7:
+       li      t8, 0
+       b       loads_done
+
        /*
         * The system call does not exist in this kernel
         */
index d07b210fbeff3667f49737dbec9d9d30d3f119ed..f543ff4feef99f8c4ce02554f4dd54da1752b6de 100644 (file)
@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
        daddu   t1, t0, 32
        bltz    t1, bad_stack
 
-1:     lw      a4, 16(t0)              # argument #5 from usp
-2:     lw      a5, 20(t0)              # argument #6 from usp
-3:     lw      a6, 24(t0)              # argument #7 from usp
-4:     lw      a7, 28(t0)              # argument #8 from usp (for indirect syscalls)
+load_a4: lw    a4, 16(t0)              # argument #5 from usp
+load_a5: lw    a5, 20(t0)              # argument #6 from usp
+load_a6: lw    a6, 24(t0)              # argument #7 from usp
+load_a7: lw    a7, 28(t0)              # argument #8 from usp
+loads_done:
 
        .section __ex_table,"a"
-       PTR     1b, bad_stack
-       PTR     2b, bad_stack
-       PTR     3b, bad_stack
-       PTR     4b, bad_stack
+       PTR     load_a4, bad_stack_a4
+       PTR     load_a5, bad_stack_a5
+       PTR     load_a6, bad_stack_a6
+       PTR     load_a7, bad_stack_a7
        .previous
 
        li      t1, _TIF_WORK_SYSCALL_ENTRY
@@ -167,6 +168,22 @@ bad_stack:
        sd      t0, PT_R7(sp)
        j       o32_syscall_exit
 
+bad_stack_a4:
+       li      a4, 0
+       b       load_a5
+
+bad_stack_a5:
+       li      a5, 0
+       b       load_a6
+
+bad_stack_a6:
+       li      a6, 0
+       b       load_a7
+
+bad_stack_a7:
+       li      a7, 0
+       b       loads_done
+
 not_o32_scall:
        /*
         * This is not an o32 compatibility syscall, pass it on
@@ -383,7 +400,7 @@ EXPORT(sys32_call_table)
        PTR     sys_connect                     /* 4170 */
        PTR     sys_getpeername
        PTR     sys_getsockname
-       PTR     sys_getsockopt
+       PTR     compat_sys_getsockopt
        PTR     sys_listen
        PTR     compat_sys_recv                 /* 4175 */
        PTR     compat_sys_recvfrom
index be73c491182bcab53f74aa765498d4a026a5b71b..008b3378653a004b5cd2890445b6d23c250ca1f5 100644 (file)
@@ -337,6 +337,11 @@ static void __init bootmem_init(void)
                        min_low_pfn = start;
                if (end <= reserved_end)
                        continue;
+#ifdef CONFIG_BLK_DEV_INITRD
+               /* mapstart should be after initrd_end */
+               if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
+                       continue;
+#endif
                if (start >= mapstart)
                        continue;
                mapstart = max(reserved_end, start);
@@ -366,14 +371,6 @@ static void __init bootmem_init(void)
                max_low_pfn = PFN_DOWN(HIGHMEM_START);
        }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-       /*
-        * mapstart should be after initrd_end
-        */
-       if (initrd_end)
-               mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
-#endif
-
        /*
         * Initialize the boot-time allocator with low memory only.
         */
index 4251d390b5b66e0c20a7271659a280e448ca0ffe..c88937745b4ea18c7faf699f936f00cd42f9372d 100644 (file)
@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
        /*
         * Patch the start of mips_cps_core_entry to provide:
         *
-        * v0 = CM base address
+        * v1 = CM base address
         * s0 = kseg0 CCA
         */
        entry_code = (u32 *)&mips_cps_core_entry;
@@ -369,7 +369,7 @@ void play_dead(void)
 
 static void wait_for_sibling_halt(void *ptr_cpu)
 {
-       unsigned cpu = (unsigned)ptr_cpu;
+       unsigned cpu = (unsigned long)ptr_cpu;
        unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
        unsigned halted;
        unsigned long flags;
@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu)
                 */
                err = smp_call_function_single(cpu_death_sibling,
                                               wait_for_sibling_halt,
-                                              (void *)cpu, 1);
+                                              (void *)(unsigned long)cpu, 1);
                if (err)
                        panic("Failed to call remote sibling CPU\n");
        }
index faa46ebd9ddae2fc43f20d6ff65f28688f665c4f..d0744cc77ea7f7a02d94c96faf190787f7e88f64 100644 (file)
@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(cpu_core_map);
 
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
 /* representing cpus for which sibling maps can be computed */
 static cpumask_t cpu_sibling_setup_map;
 
@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
        }
 }
 
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+static inline void calculate_cpu_foreign_map(void)
+{
+       int i, k, core_present;
+       cpumask_t temp_foreign_map;
+
+       /* Re-calculate the mask */
+       for_each_online_cpu(i) {
+               core_present = 0;
+               for_each_cpu(k, &temp_foreign_map)
+                       if (cpu_data[i].package == cpu_data[k].package &&
+                           cpu_data[i].core == cpu_data[k].core)
+                               core_present = 1;
+               if (!core_present)
+                       cpumask_set_cpu(i, &temp_foreign_map);
+       }
+
+       cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+}
+
 struct plat_smp_ops *mp_ops;
 EXPORT_SYMBOL(mp_ops);
 
@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
        set_cpu_sibling_map(cpu);
        set_cpu_core_map(cpu);
 
+       calculate_cpu_foreign_map();
+
        cpumask_set_cpu(cpu, &cpu_callin_map);
 
        synchronise_count_slave(cpu);
@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
 static void stop_this_cpu(void *dummy)
 {
        /*
-        * Remove this CPU:
+        * Remove this CPU. Be a bit slow here and
+        * set the bits for every online CPU so we don't miss
+        * any IPI whilst taking this VPE down.
         */
+
+       cpumask_copy(&cpu_foreign_map, cpu_online_mask);
+
+       /* Make it visible to every other CPU */
+       smp_mb();
+
        set_cpu_online(smp_processor_id(), false);
+       calculate_cpu_foreign_map();
        local_irq_disable();
        while (1);
 }
@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        mp_ops->prepare_cpus(max_cpus);
        set_cpu_sibling_map(0);
        set_cpu_core_map(0);
+       calculate_cpu_foreign_map();
 #ifndef CONFIG_HOTPLUG_CPU
        init_cpu_present(cpu_possible_mask);
 #endif
index 2a7b38ed23f06e817bba6187b8890b98489e168d..e207a43b5f8f0bcbf0544e5289cfc08126cbc7f5 100644 (file)
@@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
 
-               /* Boot CPU's cache setup in setup_arch(). */
-               if (!is_boot_cpu)
-                       cpu_cache_init();
-               tlb_init();
+       /* Boot CPU's cache setup in setup_arch(). */
+       if (!is_boot_cpu)
+               cpu_cache_init();
+       tlb_init();
        TLBMISS_HANDLER_SETUP();
 }
 
index cc0e4fd548e64f46642c56eb38cea7b8d6480e7e..4e116d23bab38f0130ef566e561cdde26c30a979 100644 (file)
@@ -3,7 +3,7 @@
  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
  * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
  *
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
  *  This program is free software; you can redistribute         it and/or modify it
index 72fed003a536d65e2676204753a6fa764f8e8b29..01fbed1370289a06d4440aacfad61c5e6ab77776 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2003 ICT CAS
  * Author: Michael Guo <guoyi@ict.ac.cn>
  *
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
  * Copyright (C) 2009 Lemote Inc.
index 12c75db23420c70fd5ff7837a78554f15cecd606..875037063a80ecc634d74759ce3032b78261ea19 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * CS5536 General timer functions
  *
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Yanhua, yanh@lemote.com
  *
  * Copyright (C) 2009 Lemote Inc.
index 22f04ca2ff3e58cd9ddddfa1ded6d84e5147efa0..f6c44dd332e2a388d28e5882a0c47e84ed3ca3f9 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2003 ICT CAS
  * Author: Michael Guo <guoyi@ict.ac.cn>
  *
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
  * Copyright (C) 2009 Lemote Inc.
index 687003b19b4546b77e0752031c4fcf918c037454..d36d969a4a878b66a50eda926158b047fdf53ef6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
  *  This program is free software; you can redistribute         it and/or modify it
index d477dd6bb3269cc9246def13db444f0628f1ce4c..2dc5122f0e09a454f3ade53a87eaf32f806b12ec 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
  *  This program is free software; you can redistribute         it and/or modify it
index ef5ec8f3de5f2080588a41ec7ad8a495e4fa070c..892963f860b7a21e4b2f4cfa521a6a514d52f347 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
  *  This program is free software; you can redistribute         it and/or modify it
index 462e34d46b4a9fa2265532474ecda20d249e3f79..a78fb657068cb39fc785e241f9b70aeaa412a09b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
  * Author: Yanhua, yanh@lemote.com
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -15,7 +15,7 @@
 #include <linux/spinlock.h>
 
 #include <asm/clock.h>
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
 
 static LIST_HEAD(clock_list);
 static DEFINE_SPINLOCK(clock_lock);
index 12d14ed4877868abce36029ddad6df1b94339dba..6f9e010cec4d50f2cae8177c41d9ae95e0189b22 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
- *                    Insititute of Computing Technology
+ *                    Institute of Computing Technology
  * Author:  Xiang Gao, gaoxiang@ict.ac.cn
  *          Huacai Chen, chenhc@lemote.com
  *          Xiaofu Meng, Shuangshuang Zhang
index 22b9b2cb9219fa4e4eb7b9cc340125a4c9aa4053..712f17a2ecf206e1b5d33a63956e62baac29cb08 100644 (file)
@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                        /* Fall through */
                case jr_op:
                        /* For R6, JR already emulated in jalr_op */
-                       if (NO_R6EMU && insn.r_format.opcode == jr_op)
+                       if (NO_R6EMU && insn.r_format.func == jr_op)
                                break;
                        *contpc = regs->regs[insn.r_format.rs];
                        return 1;
@@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                return 1;
        case blezl_op:
-               if (NO_R6EMU)
+               if (!insn.i_format.rt && NO_R6EMU)
                        break;
        case blez_op:
 
@@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                return 1;
        case bgtzl_op:
-               if (NO_R6EMU)
+               if (!insn.i_format.rt && NO_R6EMU)
                        break;
        case bgtz_op:
                /*
index 7f660dc67596e625cd65dc9f0980735d321a05dc..fbea4432f3f23fe3d44d278f48d28fd78797d580 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/cacheflush.h> /* for run_uncached() */
 #include <asm/traps.h>
 #include <asm/dma-coherence.h>
+#include <asm/mips-cm.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
 {
        preempt_disable();
 
-#ifndef CONFIG_MIPS_MT_SMP
-       smp_call_function(func, info, 1);
-#endif
+       /*
+        * The Coherent Manager propagates address-based cache ops to other
+        * cores but not index-based ops. However, r4k_on_each_cpu is used
+        * in both cases so there is no easy way to tell what kind of op is
+        * executed to the other cores. The best we can probably do is
+        * to restrict that call when a CM is not present because both
+        * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
+        */
+       if (!mips_cm_present())
+               smp_call_function_many(&cpu_foreign_map, func, info, 1);
        func(info);
        preempt_enable();
 }
@@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void)
 }
 
 static char *way_string[] = { NULL, "direct mapped", "2-way",
-       "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
+       "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+       "9-way", "10-way", "11-way", "12-way",
+       "13-way", "14-way", "15-way", "16-way",
 };
 
 static void probe_pcache(void)
index 185e68261f45937eae2b17dcd502420ce0121f69..5625b190edc061afbf2a8885e976b48014270325 100644 (file)
@@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts)
 
 int get_c0_fdc_int(void)
 {
-       int mips_cpu_fdc_irq;
+       /*
+        * Some cores claim the FDC is routable through the GIC, but it doesn't
+        * actually seem to be connected for those Malta bitstreams.
+        */
+       switch (current_cpu_type()) {
+       case CPU_INTERAPTIV:
+       case CPU_PROAPTIV:
+               return -1;
+       };
 
        if (cpu_has_veic)
-               mips_cpu_fdc_irq = -1;
+               return -1;
        else if (gic_present)
-               mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+               return gic_get_c0_fdc_int();
        else if (cp0_fdc_irq >= 0)
-               mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+               return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
        else
-               mips_cpu_fdc_irq = -1;
-
-       return mips_cpu_fdc_irq;
+               return -1;
 }
 
 int get_c0_perfcount_int(void)
index d2dc836523a3dc917b4686dc662d07a9cc576b73..8bd8ebb20a72631501e5365478d10aa0d07b992b 100644 (file)
@@ -63,13 +63,19 @@ void __init plat_mem_setup(void)
        plat_setup_iocoherency();
 }
 
-#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+#define DEFAULT_CPC_BASE_ADDR  0x1bde0000
+#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
 
 phys_addr_t mips_cpc_default_phys_base(void)
 {
        return DEFAULT_CPC_BASE_ADDR;
 }
 
+phys_addr_t mips_cdmm_phys_base(void)
+{
+       return DEFAULT_CDMM_BASE_ADDR;
+}
+
 static void __init mips_nmi_setup(void)
 {
        void *base;
index 67889fcea8aa8f27c9e3e4bf7249b6a0ec497ff9..7c73fcb92a108799866c8d603019129d33c57ee6 100644 (file)
@@ -27,6 +27,11 @@ int get_c0_perfcount_int(void)
        return gic_get_c0_perfcount_int();
 }
 
+int get_c0_fdc_int(void)
+{
+       return gic_get_c0_fdc_int();
+}
+
 void __init plat_time_init(void)
 {
        struct device_node *np;
index 0a183756d6ec2badccc840b881214077edaf556b..f93c4a4e6580300f347a4d69beccc5fe4810452b 100644 (file)
@@ -16,7 +16,7 @@
 #include <asm/processor.h>
 #include <asm/cache.h>
 
-extern spinlock_t pa_dbit_lock;
+extern spinlock_t pa_tlb_lock;
 
 /*
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
  */
 #define kern_addr_valid(addr)  (1)
 
+/* Purge data and instruction TLB entries.  Must be called holding
+ * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+
+static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+       mtsp(mm->context, 1);
+       pdtlb(addr);
+       if (unlikely(split_tlb))
+               pitlb(addr);
+}
+
 /* Certain architectures need to do special things when PTEs
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
                 *(pteptr) = (pteval);                           \
         } while(0)
 
-extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+#define pte_inserted(x)                                                \
+       ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED))          \
+        == (_PAGE_PRESENT|_PAGE_ACCESSED))
 
-#define set_pte_at(mm, addr, ptep, pteval)                      \
-       do {                                                    \
+#define set_pte_at(mm, addr, ptep, pteval)                     \
+       do {                                                    \
+               pte_t old_pte;                                  \
                unsigned long flags;                            \
-               spin_lock_irqsave(&pa_dbit_lock, flags);        \
-               set_pte(ptep, pteval);                          \
-               purge_tlb_entries(mm, addr);                    \
-               spin_unlock_irqrestore(&pa_dbit_lock, flags);   \
+               spin_lock_irqsave(&pa_tlb_lock, flags);         \
+               old_pte = *ptep;                                \
+               set_pte(ptep, pteval);                          \
+               if (pte_inserted(old_pte))                      \
+                       purge_tlb_entries(mm, addr);            \
+               spin_unlock_irqrestore(&pa_tlb_lock, flags);    \
        } while (0)
 
 #endif /* !__ASSEMBLY__ */
@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
 
 #define pte_none(x)     (pte_val(x) == 0)
 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp)  do { pte_val(*(xp)) = 0; } while (0)
+#define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
 
 #define pmd_flag(x)    (pmd_val(x) & PxD_FLAG_MASK)
 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
        if (!pte_young(*ptep))
                return 0;
 
-       spin_lock_irqsave(&pa_dbit_lock, flags);
+       spin_lock_irqsave(&pa_tlb_lock, flags);
        pte = *ptep;
        if (!pte_young(pte)) {
-               spin_unlock_irqrestore(&pa_dbit_lock, flags);
+               spin_unlock_irqrestore(&pa_tlb_lock, flags);
                return 0;
        }
        set_pte(ptep, pte_mkold(pte));
        purge_tlb_entries(vma->vm_mm, addr);
-       spin_unlock_irqrestore(&pa_dbit_lock, flags);
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
        return 1;
 }
 
@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        pte_t old_pte;
        unsigned long flags;
 
-       spin_lock_irqsave(&pa_dbit_lock, flags);
+       spin_lock_irqsave(&pa_tlb_lock, flags);
        old_pte = *ptep;
-       pte_clear(mm,addr,ptep);
-       purge_tlb_entries(mm, addr);
-       spin_unlock_irqrestore(&pa_dbit_lock, flags);
+       set_pte(ptep, __pte(0));
+       if (pte_inserted(old_pte))
+               purge_tlb_entries(mm, addr);
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
 
        return old_pte;
 }
@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        unsigned long flags;
-       spin_lock_irqsave(&pa_dbit_lock, flags);
+       spin_lock_irqsave(&pa_tlb_lock, flags);
        set_pte(ptep, pte_wrprotect(*ptep));
        purge_tlb_entries(mm, addr);
-       spin_unlock_irqrestore(&pa_dbit_lock, flags);
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
 }
 
 #define pte_same(A,B)  (pte_val(A) == pte_val(B))
index 9d086a599fa05f56a13cbd6b214b466ad4b43a12..e84b96478193ca959c27ec9493d298f845ac6030 100644 (file)
@@ -13,6 +13,9 @@
  * active at any one time on the Merced bus.  This tlb purge
  * synchronisation is fairly lightweight and harmless so we activate
  * it on all systems not just the N class.
+
+ * It is also used to ensure PTE updates are atomic and consistent
+ * with the TLB.
  */
 extern spinlock_t pa_tlb_lock;
 
@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
 
 #define smp_flush_tlb_all()    flush_tlb_all()
 
+int __flush_tlb_range(unsigned long sid,
+       unsigned long start, unsigned long end);
+
+#define flush_tlb_range(vma, start, end) \
+       __flush_tlb_range((vma)->vm_mm->context, start, end)
+
+#define flush_tlb_kernel_range(start, end) \
+       __flush_tlb_range(0, start, end)
+
 /*
  * flush_tlb_mm()
  *
- * XXX This code is NOT valid for HP-UX compatibility processes,
- * (although it will probably work 99% of the time). HP-UX
- * processes are free to play with the space id's and save them
- * over long periods of time, etc. so we have to preserve the
- * space and just flush the entire tlb. We need to check the
- * personality in order to do that, but the personality is not
- * currently being set correctly.
- *
- * Of course, Linux processes could do the same thing, but
- * we don't support that (and the compilers, dynamic linker,
- * etc. do not do that).
+ * The code to switch to a new context is NOT valid for processes
+ * which play with the space id's.  Thus, we have to preserve the
+ * space and just flush the entire tlb.  However, the compilers,
+ * dynamic linker, etc, do not manipulate space id's, so there
+ * could be a significant performance benefit in switching contexts
+ * and not flushing the whole tlb.
  */
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
        BUG_ON(mm == &init_mm); /* Should never happen */
 
 #if 1 || defined(CONFIG_SMP)
+       /* Except for very small threads, flushing the whole TLB is
+        * faster than using __flush_tlb_range.  The pdtlb and pitlb
+        * instructions are very slow because of the TLB broadcast.
+        * It might be faster to do local range flushes on all CPUs
+        * on PA 2.0 systems.
+        */
        flush_tlb_all();
 #else
        /* FIXME: currently broken, causing space id and protection ids
-        *  to go out of sync, resulting in faults on userspace accesses.
+        * to go out of sync, resulting in faults on userspace accesses.
+        * This approach needs further investigation since running many
+        * small applications (e.g., GCC testsuite) is faster on HP-UX.
         */
        if (mm) {
                if (mm->context != 0)
@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 {
        unsigned long flags, sid;
 
-       /* For one page, it's not worth testing the split_tlb variable */
-
-       mb();
        sid = vma->vm_mm->context;
        purge_tlb_start(flags);
        mtsp(sid, 1);
        pdtlb(addr);
-       pitlb(addr);
+       if (unlikely(split_tlb))
+               pitlb(addr);
        purge_tlb_end(flags);
 }
-
-void __flush_tlb_range(unsigned long sid,
-       unsigned long start, unsigned long end);
-
-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
-
-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
-
 #endif
index f6448c7c62b51f3785c758b2073b0eb967362633..cda6dbbe98426c4bd77a7c0ff31f0525b06fc2ee 100644 (file)
@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
 
 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+
+#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
 
 void __init parisc_setup_cache_timing(void)
 {
        unsigned long rangetime, alltime;
-       unsigned long size;
+       unsigned long size, start;
 
        alltime = mfctl(16);
        flush_data_cache();
@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
        /* Racy, but if we see an intermediate value, it's ok too... */
        parisc_cache_flush_threshold = size * alltime / rangetime;
 
-       parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 
+       parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
        if (!parisc_cache_flush_threshold)
                parisc_cache_flush_threshold = FLUSH_THRESHOLD;
 
        if (parisc_cache_flush_threshold > cache_info.dc_size)
                parisc_cache_flush_threshold = cache_info.dc_size;
 
-       printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
+       printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
+               parisc_cache_flush_threshold/1024);
+
+       /* calculate TLB flush threshold */
+
+       alltime = mfctl(16);
+       flush_tlb_all();
+       alltime = mfctl(16) - alltime;
+
+       size = PAGE_SIZE;
+       start = (unsigned long) _text;
+       rangetime = mfctl(16);
+       while (start < (unsigned long) _end) {
+               flush_tlb_kernel_range(start, start + PAGE_SIZE);
+               start += PAGE_SIZE;
+               size += PAGE_SIZE;
+       }
+       rangetime = mfctl(16) - rangetime;
+
+       printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+               alltime, size, rangetime);
+
+       parisc_tlb_flush_threshold = size * alltime / rangetime;
+       parisc_tlb_flush_threshold *= num_online_cpus();
+       parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
+       if (!parisc_tlb_flush_threshold)
+               parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
+
+       printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
+               parisc_tlb_flush_threshold/1024);
 }
 
 extern void purge_kernel_dcache_page_asm(unsigned long);
@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 }
 EXPORT_SYMBOL(copy_user_page);
 
-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
-{
-       unsigned long flags;
-
-       /* Note: purge_tlb_entries can be called at startup with
-          no context.  */
-
-       purge_tlb_start(flags);
-       mtsp(mm->context, 1);
-       pdtlb(addr);
-       pitlb(addr);
-       purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(purge_tlb_entries);
-
-void __flush_tlb_range(unsigned long sid, unsigned long start,
-                      unsigned long end)
+/* __flush_tlb_range()
+ *
+ * returns 1 if all TLBs were flushed.
+ */
+int __flush_tlb_range(unsigned long sid, unsigned long start,
+                     unsigned long end)
 {
-       unsigned long npages;
+       unsigned long flags, size;
 
-       npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-       if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
+       size = (end - start);
+       if (size >= parisc_tlb_flush_threshold) {
                flush_tlb_all();
-       else {
-               unsigned long flags;
+               return 1;
+       }
 
+       /* Purge TLB entries for small ranges using the pdtlb and
+          pitlb instructions.  These instructions execute locally
+          but cause a purge request to be broadcast to other TLBs.  */
+       if (likely(!split_tlb)) {
+               while (start < end) {
+                       purge_tlb_start(flags);
+                       mtsp(sid, 1);
+                       pdtlb(start);
+                       purge_tlb_end(flags);
+                       start += PAGE_SIZE;
+               }
+               return 0;
+       }
+
+       /* split TLB case */
+       while (start < end) {
                purge_tlb_start(flags);
                mtsp(sid, 1);
-               if (split_tlb) {
-                       while (npages--) {
-                               pdtlb(start);
-                               pitlb(start);
-                               start += PAGE_SIZE;
-                       }
-               } else {
-                       while (npages--) {
-                               pdtlb(start);
-                               start += PAGE_SIZE;
-                       }
-               }
+               pdtlb(start);
+               pitlb(start);
                purge_tlb_end(flags);
+               start += PAGE_SIZE;
        }
+       return 0;
 }
 
 static void cacheflush_h_tmp_function(void *dummy)
index 75819617f93b9c8fe3e7cd1c655354c83e338360..c5ef4081b01d2f0aee841a7f8f06b0be416a6fd8 100644 (file)
@@ -45,7 +45,7 @@
        .level 2.0
 #endif
 
-       .import         pa_dbit_lock,data
+       .import         pa_tlb_lock,data
 
        /* space_to_prot macro creates a prot id from a space id */
 
        SHLREG          %r9,PxD_VALUE_SHIFT,\pmd
        extru           \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
-       shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
-       LDREG           %r0(\pmd),\pte          /* pmd is now pte */
+       shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
+       LDREG           %r0(\pmd),\pte
        bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
        .endm
 
        L2_ptep         \pgd,\pte,\index,\va,\fault
        .endm
 
-       /* Acquire pa_dbit_lock lock. */
-       .macro          dbit_lock       spc,tmp,tmp1
+       /* Acquire pa_tlb_lock lock and recheck page is still present. */
+       .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
 #ifdef CONFIG_SMP
        cmpib,COND(=),n 0,\spc,2f
-       load32          PA(pa_dbit_lock),\tmp
+       load32          PA(pa_tlb_lock),\tmp
 1:     LDCW            0(\tmp),\tmp1
        cmpib,COND(=)   0,\tmp1,1b
        nop
+       LDREG           0(\ptp),\pte
+       bb,<,n          \pte,_PAGE_PRESENT_BIT,2f
+       b               \fault
+       stw              \spc,0(\tmp)
 2:
 #endif
        .endm
 
-       /* Release pa_dbit_lock lock without reloading lock address. */
-       .macro          dbit_unlock0    spc,tmp
+       /* Release pa_tlb_lock lock without reloading lock address. */
+       .macro          tlb_unlock0     spc,tmp
 #ifdef CONFIG_SMP
        or,COND(=)      %r0,\spc,%r0
        stw             \spc,0(\tmp)
 #endif
        .endm
 
-       /* Release pa_dbit_lock lock. */
-       .macro          dbit_unlock1    spc,tmp
+       /* Release pa_tlb_lock lock. */
+       .macro          tlb_unlock1     spc,tmp
 #ifdef CONFIG_SMP
-       load32          PA(pa_dbit_lock),\tmp
-       dbit_unlock0    \spc,\tmp
+       load32          PA(pa_tlb_lock),\tmp
+       tlb_unlock0     \spc,\tmp
 #endif
        .endm
 
        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
         * don't needlessly dirty the cache line if it was already set */
-       .macro          update_ptep     spc,ptep,pte,tmp,tmp1
-#ifdef CONFIG_SMP
-       or,COND(=)      %r0,\spc,%r0
-       LDREG           0(\ptep),\pte
-#endif
+       .macro          update_accessed ptp,pte,tmp,tmp1
        ldi             _PAGE_ACCESSED,\tmp1
        or              \tmp1,\pte,\tmp
        and,COND(<>)    \tmp1,\pte,%r0
-       STREG           \tmp,0(\ptep)
+       STREG           \tmp,0(\ptp)
        .endm
 
        /* Set the dirty bit (and accessed bit).  No need to be
         * clever, this is only used from the dirty fault */
-       .macro          update_dirty    spc,ptep,pte,tmp
-#ifdef CONFIG_SMP
-       or,COND(=)      %r0,\spc,%r0
-       LDREG           0(\ptep),\pte
-#endif
+       .macro          update_dirty    ptp,pte,tmp
        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
        or              \tmp,\pte,\pte
-       STREG           \pte,0(\ptep)
+       STREG           \pte,0(\ptp)
        .endm
 
        /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1202,20 +1198,20 @@ dtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        idtlba          pte,(%sr1,va)
        idtlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        idtlba          pte,(%sr1,va)
        idtlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1269,16 +1264,16 @@ dtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0
+       f_extend        pte,t1
 
        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0
+       f_extend        pte,t1
        
-        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
+       idtlbt          pte,prot
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1406,14 +1401,14 @@ itlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,itlb_fault
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1458,20 +1453,20 @@ itlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        iitlba          pte,(%sr1,va)
        iitlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1482,20 +1477,20 @@ naitlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        iitlba          pte,(%sr1,va)
        iitlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1516,16 +1511,16 @@ itlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0  
+       f_extend        pte,t1
 
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1536,16 +1531,16 @@ naitlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0
+       f_extend        pte,t1
 
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1568,14 +1563,14 @@ dbit_trap_20w:
 
        L3_ptep         ptp,pte,t0,va,dbit_fault
 
-       dbit_lock       spc,t0,t1
-       update_dirty    spc,ptp,pte,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       update_dirty    ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
                
        idtlbt          pte,prot
-       dbit_unlock0    spc,t0
 
+       tlb_unlock0     spc,t0
        rfir
        nop
 #else
@@ -1588,8 +1583,8 @@ dbit_trap_11:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-       dbit_lock       spc,t0,t1
-       update_dirty    spc,ptp,pte,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       update_dirty    ptp,pte,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1600,8 +1595,8 @@ dbit_trap_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t1, %sr1     /* Restore sr1 */
-       dbit_unlock0    spc,t0
 
+       tlb_unlock0     spc,t0
        rfir
        nop
 
@@ -1612,16 +1607,16 @@ dbit_trap_20:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-       dbit_lock       spc,t0,t1
-       update_dirty    spc,ptp,pte,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       update_dirty    ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t1
        
-        idtlbt          pte,prot
-       dbit_unlock0    spc,t0
+       idtlbt          pte,prot
 
+       tlb_unlock0     spc,t0
        rfir
        nop
 #endif
index 6548fd1d2e62defc2dfc6e8dfc125a42aa8e986e..b99b39f1da02431865b94e6bfe720e9858163a0d 100644 (file)
 
 #include "../math-emu/math-emu.h"      /* for handle_fpe() */
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-DEFINE_SPINLOCK(pa_dbit_lock);
-#endif
-
 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
        struct pt_regs *regs);
 
index ccde8f084ce426ab382964ec5e2eba389317be44..112ccf4975620f899a4747c98a13cd09260b5598 100644 (file)
 
        .text
 
+/*
+ * Used by threads when the lock bit of core_idle_state is set.
+ * Threads will spin in HMT_LOW until the lock bit is cleared.
+ * r14 - pointer to core_idle_state
+ * r15 - used to load contents of core_idle_state
+ */
+
+core_idle_lock_held:
+       HMT_LOW
+3:     lwz     r15,0(r14)
+       andi.   r15,r15,PNV_CORE_IDLE_LOCK_BIT
+       bne     3b
+       HMT_MEDIUM
+       lwarx   r15,0,r14
+       blr
+
 /*
  * Pass requested state in r3:
  *     r3 - PNV_THREAD_NAP/SLEEP/WINKLE
@@ -150,6 +166,10 @@ power7_enter_nap_mode:
        ld      r14,PACA_CORE_IDLE_STATE_PTR(r13)
 lwarx_loop1:
        lwarx   r15,0,r14
+
+       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
+       bnel    core_idle_lock_held
+
        andc    r15,r15,r7                      /* Clear thread bit */
 
        andi.   r15,r15,PNV_CORE_IDLE_THREAD_BITS
@@ -294,7 +314,7 @@ lwarx_loop2:
         * workaround undo code or resyncing timebase or restoring context
         * In either case loop until the lock bit is cleared.
         */
-       bne     core_idle_lock_held
+       bnel    core_idle_lock_held
 
        cmpwi   cr2,r15,0
        lbz     r4,PACA_SUBCORE_SIBLING_MASK(r13)
@@ -319,15 +339,6 @@ lwarx_loop2:
        isync
        b       common_exit
 
-core_idle_lock_held:
-       HMT_LOW
-core_idle_lock_loop:
-       lwz     r15,0(14)
-       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
-       bne     core_idle_lock_loop
-       HMT_MEDIUM
-       b       lwarx_loop2
-
 first_thread_in_subcore:
        /* First thread in subcore to wakeup */
        ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
index 6530f1b8874dac16dc7f71480b5d748d0ccc268d..37de90f8a845c9ef8f1410100a0d7d3b05b93195 100644 (file)
@@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs)
 
        __this_cpu_inc(irq_stat.mce_exceptions);
 
+       add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
        if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
                handled = cur_cpu_spec->machine_check_early(regs);
        return handled;
index 6d535973b200dde0f64ae19a3d47f1eaca6a4c99..a67c6d781c52b9a28ac90b0c6cc1650519e9260c 100644 (file)
@@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
                printk(KERN_ALERT "Unable to handle kernel paging request for "
                        "instruction fetch\n");
                break;
+       case 0x600:
+               printk(KERN_ALERT "Unable to handle kernel paging request for "
+                       "unaligned access at address 0x%08lx\n", regs->dar);
+               break;
        default:
                printk(KERN_ALERT "Unable to handle kernel paging request for "
                        "unknown fault\n");
index ec2eb20631d14f12bb6ffa310370a5eb230a9408..df956295c2a7ab3ede23e2a06e650d39943a1b17 100644 (file)
@@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
        if (!attr)
                return NULL;
 
+       sysfs_attr_init(&attr->attr.attr);
+
        attr->var = str;
        attr->attr.attr.name = name;
        attr->attr.attr.mode = 0444;
index 4949ef0d94004e0f315f90e37cc2a50ce3582584..37f959bf392e72a8a4bc7dee92181846b1383458 100644 (file)
@@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
        return elog;
 }
 
-static void elog_work_fn(struct work_struct *work)
+static irqreturn_t elog_event(int irq, void *data)
 {
        __be64 size;
        __be64 id;
@@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work)
        rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
                pr_err("ELOG: OPAL log info read failed\n");
-               return;
+               return IRQ_HANDLED;
        }
 
        elog_size = be64_to_cpu(size);
@@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work)
         * entries.
         */
        if (kset_find_obj(elog_kset, name))
-               return;
+               return IRQ_HANDLED;
 
        create_elog_obj(log_id, elog_size, elog_type);
-}
-
-static DECLARE_WORK(elog_work, elog_work_fn);
 
-static irqreturn_t elog_event(int irq, void *data)
-{
-       schedule_work(&elog_work);
        return IRQ_HANDLED;
 }
 
@@ -304,8 +298,8 @@ int __init opal_elog_init(void)
                return irq;
        }
 
-       rc = request_irq(irq, elog_event,
-                       IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL);
+       rc = request_threaded_irq(irq, NULL, elog_event,
+                       IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
        if (rc) {
                pr_err("%s: Can't request OPAL event irq (%d)\n",
                       __func__, rc);
index 46cb3feb0a13a280a3949f5378e771928c828b63..4ece8e40dd5414868ef9934d7a809578c37f2743 100644 (file)
@@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file)
 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
 {
        size_t addr, size;
+       pgprot_t page_prot;
        int rc;
 
        pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
@@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
        if (!opal_prd_range_is_valid(addr, size))
                return -EINVAL;
 
-       vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file,
-                                               vma->vm_pgoff,
-                                                size, vma->vm_page_prot))
-                                       | _PAGE_SPECIAL);
+       page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+                                        size, vma->vm_page_prot);
 
        rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
-                       vma->vm_page_prot);
+                               page_prot);
 
        return rc;
 }
index 2bc33674ebfc11d2525700191552389fafc1a23d..87f9623ca805d6ba601919784df3c7e6d38e488f 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pci.h>
 #include <linux/semaphore.h>
 #include <asm/msi_bitmap.h>
+#include <asm/ppc-pci.h>
 
 struct ppc4xx_hsta_msi {
        struct device *dev;
index 88c7016492c4dcbcddbc11e5851e86d9fe2608aa..97bbb6060b2526127565541b57b170a5a70bbc10 100644 (file)
@@ -28,7 +28,7 @@
 #define _ST(p, inst, v)                                                \
        ({                                                      \
                asm("1: " #inst " %0, %1;"                      \
-                   ".pushsection .coldtext.memcpy,\"ax\";"     \
+                   ".pushsection .coldtext,\"ax\";"    \
                    "2: { move r0, %2; jrp lr };"               \
                    ".section __ex_table,\"a\";"                \
                    ".align 8;"                                 \
@@ -41,7 +41,7 @@
        ({                                                      \
                unsigned long __v;                              \
                asm("1: " #inst " %0, %1;"                      \
-                   ".pushsection .coldtext.memcpy,\"ax\";"     \
+                   ".pushsection .coldtext,\"ax\";"    \
                    "2: { move r0, %2; jrp lr };"               \
                    ".section __ex_table,\"a\";"                \
                    ".align 8;"                                 \
index 55bced17dc95a475194f624d0e259fcaee3f8c8a..3dbb7e7909ca50bc45d44e7a0fbc18d79e3cdb70 100644 (file)
@@ -254,6 +254,11 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
 config ARCH_SUPPORTS_DEBUG_PAGEALLOC
        def_bool y
 
+config KASAN_SHADOW_OFFSET
+       hex
+       depends on KASAN
+       default 0xdffffc0000000000
+
 config HAVE_INTEL_TXT
        def_bool y
        depends on INTEL_IOMMU && ACPI
@@ -2015,7 +2020,7 @@ config CMDLINE_BOOL
 
          To compile command line arguments into the kernel,
          set this option to 'Y', then fill in the
-         the boot arguments in CONFIG_CMDLINE.
+         boot arguments in CONFIG_CMDLINE.
 
          Systems with fully functional boot loaders (i.e. non-embedded)
          should leave this option set to 'N'.
index 99efebb2f69df5fd5a9d9fdf1a37c6a15ac38dd1..ca3ce9ab9385336cec07258c87d05c6eb5580740 100644 (file)
@@ -9,7 +9,7 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
 DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
 
 extern void init_espfix_bsp(void);
-extern void init_espfix_ap(void);
+extern void init_espfix_ap(int cpu);
 
 #endif /* CONFIG_X86_64 */
 
index 8b22422fbad8ad97cd7affcabc432a72e5cda883..74a2a8dc99089ddbcefb833e671f6a5f6d1ed1e0 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
 #ifdef CONFIG_KASAN
-void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
 void __init kasan_init(void);
 #else
-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
 static inline void kasan_init(void) { }
 #endif
 
index 28eba2d38b1570c77f3b82543d6ee84686333027..f813261d97405710c99cd0982d047a227a4c4fd3 100644 (file)
@@ -409,12 +409,6 @@ static void __setup_vector_irq(int cpu)
        int irq, vector;
        struct apic_chip_data *data;
 
-       /*
-        * vector_lock will make sure that we don't run into irq vector
-        * assignments that might be happening on another cpu in parallel,
-        * while we setup our initial vector to irq mappings.
-        */
-       raw_spin_lock(&vector_lock);
        /* Mark the inuse vectors */
        for_each_active_irq(irq) {
                data = apic_chip_data(irq_get_irq_data(irq));
@@ -436,16 +430,16 @@ static void __setup_vector_irq(int cpu)
                if (!cpumask_test_cpu(cpu, data->domain))
                        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
        }
-       raw_spin_unlock(&vector_lock);
 }
 
 /*
- * Setup the vector to irq mappings.
+ * Setup the vector to irq mappings. Must be called with vector_lock held.
  */
 void setup_vector_irq(int cpu)
 {
        int irq;
 
+       lockdep_assert_held(&vector_lock);
        /*
         * On most of the platforms, legacy PIC delivers the interrupts on the
         * boot cpu. But there are certain platforms where PIC interrupts are
index 89427d8d4fc53addcb429b994f02de7d0bb7dfa4..eec40f595ab97ee74b83002a21e09a0d2c032cbb 100644 (file)
@@ -175,7 +175,9 @@ static __init void early_serial_init(char *s)
        }
 
        if (*s) {
-               if (kstrtoul(s, 0, &baud) < 0 || baud == 0)
+               baud = simple_strtoull(s, &e, 0);
+
+               if (baud == 0 || s == e)
                        baud = DEFAULT_BAUD;
        }
 
index f5d0730e7b084beddefb0f4b8ff6755b9166535a..ce95676abd603d2590e258374931b9ad72305f29 100644 (file)
@@ -131,25 +131,24 @@ void __init init_espfix_bsp(void)
        init_espfix_random();
 
        /* The rest is the same as for any other processor */
-       init_espfix_ap();
+       init_espfix_ap(0);
 }
 
-void init_espfix_ap(void)
+void init_espfix_ap(int cpu)
 {
-       unsigned int cpu, page;
+       unsigned int page;
        unsigned long addr;
        pud_t pud, *pud_p;
        pmd_t pmd, *pmd_p;
        pte_t pte, *pte_p;
-       int n;
+       int n, node;
        void *stack_page;
        pteval_t ptemask;
 
        /* We only have to do this once... */
-       if (likely(this_cpu_read(espfix_stack)))
+       if (likely(per_cpu(espfix_stack, cpu)))
                return;         /* Already initialized */
 
-       cpu = smp_processor_id();
        addr = espfix_base_addr(cpu);
        page = cpu/ESPFIX_STACKS_PER_PAGE;
 
@@ -165,12 +164,15 @@ void init_espfix_ap(void)
        if (stack_page)
                goto unlock_done;
 
+       node = cpu_to_node(cpu);
        ptemask = __supported_pte_mask;
 
        pud_p = &espfix_pud_page[pud_index(addr)];
        pud = *pud_p;
        if (!pud_present(pud)) {
-               pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
+               struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+               pmd_p = (pmd_t *)page_address(page);
                pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
                paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PUD_CLONES; n++)
@@ -180,7 +182,9 @@ void init_espfix_ap(void)
        pmd_p = pmd_offset(&pud, addr);
        pmd = *pmd_p;
        if (!pmd_present(pmd)) {
-               pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
+               struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+               pte_p = (pte_t *)page_address(page);
                pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
                paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PMD_CLONES; n++)
@@ -188,7 +192,7 @@ void init_espfix_ap(void)
        }
 
        pte_p = pte_offset_kernel(&pmd, addr);
-       stack_page = (void *)__get_free_page(GFP_KERNEL);
+       stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
        pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
        for (n = 0; n < ESPFIX_PTE_CLONES; n++)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
@@ -199,7 +203,7 @@ void init_espfix_ap(void)
 unlock_done:
        mutex_unlock(&espfix_init_mutex);
 done:
-       this_cpu_write(espfix_stack, addr);
-       this_cpu_write(espfix_waddr, (unsigned long)stack_page
-                      + (addr & ~PAGE_MASK));
+       per_cpu(espfix_stack, cpu) = addr;
+       per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
+                                     + (addr & ~PAGE_MASK);
 }
index 5a4668136e9892b6b8695d1d82edf86afbdea0a0..f129a9af635747ce616e354a43f30297f3fecabf 100644 (file)
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
        /* Kill off the identity-map trampoline */
        reset_early_page_tables();
 
-       kasan_map_early_shadow(early_level4_pgt);
-
-       /* clear bss before set_intr_gate with early_idt_handler */
        clear_bss();
 
+       clear_page(init_level4_pgt);
+
+       kasan_early_init();
+
        for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
                set_intr_gate(i, early_idt_handler_array[i]);
        load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
         */
        load_ucode_bsp();
 
-       clear_page(init_level4_pgt);
        /* set init_level4_pgt kernel high mapping*/
        init_level4_pgt[511] = early_level4_pgt[511];
 
-       kasan_map_early_shadow(init_level4_pgt);
-
        x86_64_start_reservations(real_mode_data);
 }
 
index e5c27f729a3840b2755533cb597fbf702de07621..1d40ca8a73f27231e49c35704794d0cbe6ca94dc 100644 (file)
@@ -516,38 +516,9 @@ ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
        .quad   0x0000000000000000
 
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT)                               \
-       .rept (COUNT) ;                                 \
-       .quad   (VAL) ;                                 \
-       .endr
-
-NEXT_PAGE(kasan_zero_pte)
-       FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
-       FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
-       FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
 #include "../../x86/xen/xen-head.S"
        
        __PAGE_ALIGNED_BSS
 NEXT_PAGE(empty_zero_page)
        .skip PAGE_SIZE
 
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
-       .skip PAGE_SIZE
-#endif
index 88b366487b0e44b613e83d412febfad74381ca92..c7dfe1be784e2c6ef672c14454d24102f24b18e9 100644 (file)
@@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void)
                        if (!desc)
                                continue;
 
+                       /*
+                        * Protect against concurrent action removal,
+                        * affinity changes etc.
+                        */
+                       raw_spin_lock(&desc->lock);
                        data = irq_desc_get_irq_data(desc);
                        cpumask_copy(&affinity_new, data->affinity);
                        cpumask_clear_cpu(this_cpu, &affinity_new);
 
                        /* Do not count inactive or per-cpu irqs. */
-                       if (!irq_has_action(irq) || irqd_is_per_cpu(data))
+                       if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
+                               raw_spin_unlock(&desc->lock);
                                continue;
+                       }
 
+                       raw_spin_unlock(&desc->lock);
                        /*
                         * A single irq may be mapped to multiple
                         * cpu's vector_irq[] (for example IOAPIC cluster
@@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void)
                 * vector. If the vector is marked in the used vectors
                 * bitmap or an irq is assigned to it, we don't count
                 * it as available.
+                *
+                * As this is an inaccurate snapshot anyway, we can do
+                * this w/o holding vector_lock.
                 */
                for (vector = FIRST_EXTERNAL_VECTOR;
                     vector < first_system_vector; vector++) {
@@ -486,6 +497,11 @@ void fixup_irqs(void)
         */
        mdelay(1);
 
+       /*
+        * We can walk the vector array of this cpu without holding
+        * vector_lock because the cpu is already marked !online, so
+        * nothing else will touch it.
+        */
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                unsigned int irr;
 
@@ -497,9 +513,9 @@ void fixup_irqs(void)
                        irq = __this_cpu_read(vector_irq[vector]);
 
                        desc = irq_to_desc(irq);
+                       raw_spin_lock(&desc->lock);
                        data = irq_desc_get_irq_data(desc);
                        chip = irq_data_get_irq_chip(data);
-                       raw_spin_lock(&desc->lock);
                        if (chip->irq_retrigger) {
                                chip->irq_retrigger(data);
                                __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
index 8add66b22f333cbc5e8936b41e64525b7f49e1bc..d3010aa79dafaff14e74b140e12daa2756f03160 100644 (file)
@@ -170,11 +170,6 @@ static void smp_callin(void)
         */
        apic_ap_setup();
 
-       /*
-        * Need to setup vector mappings before we enable interrupts.
-        */
-       setup_vector_irq(smp_processor_id());
-
        /*
         * Save our processor parameters. Note: this information
         * is needed for clock calibration.
@@ -239,18 +234,13 @@ static void notrace start_secondary(void *unused)
        check_tsc_sync_target();
 
        /*
-        * Enable the espfix hack for this CPU
-        */
-#ifdef CONFIG_X86_ESPFIX64
-       init_espfix_ap();
-#endif
-
-       /*
-        * We need to hold vector_lock so there the set of online cpus
-        * does not change while we are assigning vectors to cpus.  Holding
-        * this lock ensures we don't half assign or remove an irq from a cpu.
+        * Lock vector_lock and initialize the vectors on this cpu
+        * before setting the cpu online. We must set it online with
+        * vector_lock held to prevent a concurrent setup/teardown
+        * from seeing a half valid vector space.
         */
        lock_vector_lock();
+       setup_vector_irq(smp_processor_id());
        set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
        cpu_set_state_online(smp_processor_id());
@@ -854,6 +844,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
        initial_code = (unsigned long)start_secondary;
        stack_start  = idle->thread.sp;
 
+       /*
+        * Enable the espfix hack for this CPU
+       */
+#ifdef CONFIG_X86_ESPFIX64
+       init_espfix_ap(cpu);
+#endif
+
        /* So we see what's up */
        announce_cpu(cpu, apicid);
 
index 505449700e0cf4e66ea6284135482ac172fe756a..7437b41f6a47c290e0f26b8933b45e469235ccc4 100644 (file)
@@ -598,10 +598,19 @@ static unsigned long quick_pit_calibrate(void)
                        if (!pit_expect_msb(0xff-i, &delta, &d2))
                                break;
 
+                       delta -= tsc;
+
+                       /*
+                        * Extrapolate the error and fail fast if the error will
+                        * never be below 500 ppm.
+                        */
+                       if (i == 1 &&
+                           d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
+                               return 0;
+
                        /*
                         * Iterate until the error is less than 500 ppm
                         */
-                       delta -= tsc;
                        if (d1+d2 >= delta >> 11)
                                continue;
 
index ddf9ecb53cc3e23171ea889dc47339e67ba5d2c8..e342586db6e4b08230ce6a596c91e27bb5fcf2db 100644 (file)
@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        unsigned long ret;
 
        if (__range_not_ok(from, n, TASK_SIZE))
-               return 0;
+               return n;
 
        /*
         * Even though this function is typically called from NMI/IRQ context
index 4860906c6b9f8e751b39ad3d5d4874c6da92c2e3..e1840f3db5b583984e53bcc7d11787706dbbed26 100644 (file)
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
 #include <linux/kdebug.h>
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_X_MAX];
 
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
 
 static int __init map_range(struct range *range)
 {
@@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start,
                pgd_clear(pgd_offset_k(start));
 }
 
-void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd)
 {
        int i;
        unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
        while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
                WARN_ON(!pmd_none(*pmd));
                set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
-                                       | __PAGE_KERNEL_RO));
+                                       | _KERNPG_TABLE));
                addr += PMD_SIZE;
                pmd = pmd_offset(pud, addr);
        }
@@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
        while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
                WARN_ON(!pud_none(*pud));
                set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
-                                       | __PAGE_KERNEL_RO));
+                                       | _KERNPG_TABLE));
                addr += PUD_SIZE;
                pud = pud_offset(pgd, addr);
        }
@@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
        while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
                WARN_ON(!pgd_none(*pgd));
                set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
-                                       | __PAGE_KERNEL_RO));
+                                       | _KERNPG_TABLE));
                addr += PGDIR_SIZE;
                pgd = pgd_offset_k(addr);
        }
@@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = {
 };
 #endif
 
+void __init kasan_early_init(void)
+{
+       int i;
+       pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+       pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+       pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               kasan_zero_pte[i] = __pte(pte_val);
+
+       for (i = 0; i < PTRS_PER_PMD; i++)
+               kasan_zero_pmd[i] = __pmd(pmd_val);
+
+       for (i = 0; i < PTRS_PER_PUD; i++)
+               kasan_zero_pud[i] = __pud(pud_val);
+
+       kasan_map_early_shadow(early_level4_pgt);
+       kasan_map_early_shadow(init_level4_pgt);
+}
+
 void __init kasan_init(void)
 {
        int i;
@@ -176,6 +209,7 @@ void __init kasan_init(void)
 
        memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
        load_cr3(early_level4_pgt);
+       __flush_tlb_all();
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
@@ -202,5 +236,8 @@ void __init kasan_init(void)
        memset(kasan_zero_page, 0, PAGE_SIZE);
 
        load_cr3(init_level4_pgt);
+       __flush_tlb_all();
        init_task.kasan_depth = 0;
+
+       pr_info("Kernel address sanitizer initialized\n");
 }
index 569ee090343fee43c318f7a1bafee0eadb65e1f6..46b58abb08c5e05efb9111f76e788a3601f6046c 100644 (file)
@@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
                                pdata->mmio_size = resource_size(rentry->res);
                        pdata->mmio_base = ioremap(rentry->res->start,
                                                   pdata->mmio_size);
-                       if (!pdata->mmio_base)
-                               goto err_out;
                        break;
                }
 
        acpi_dev_free_resource_list(&resource_list);
 
+       if (!pdata->mmio_base) {
+               ret = -ENOMEM;
+               goto err_out;
+       }
+
        pdata->dev_desc = dev_desc;
 
        if (dev_desc->setup)
index 2161fa178c8d07a5a66d605ca8dbb6a42beea4db..628a42c41ab126d12997ab02fd805df6af698c98 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/list.h>
 #include <linux/acpi.h>
 #include <linux/sort.h>
+#include <linux/pmem.h>
 #include <linux/io.h>
 #include "nfit.h"
 
@@ -305,6 +306,23 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
        return true;
 }
 
+static bool add_flush(struct acpi_nfit_desc *acpi_desc,
+               struct acpi_nfit_flush_address *flush)
+{
+       struct device *dev = acpi_desc->dev;
+       struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
+                       GFP_KERNEL);
+
+       if (!nfit_flush)
+               return false;
+       INIT_LIST_HEAD(&nfit_flush->list);
+       nfit_flush->flush = flush;
+       list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
+       dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
+                       flush->device_handle, flush->hint_count);
+       return true;
+}
+
 static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
                const void *end)
 {
@@ -338,7 +356,8 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
                        return err;
                break;
        case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
-               dev_dbg(dev, "%s: flush\n", __func__);
+               if (!add_flush(acpi_desc, table))
+                       return err;
                break;
        case ACPI_NFIT_TYPE_SMBIOS:
                dev_dbg(dev, "%s: smbios\n", __func__);
@@ -389,6 +408,7 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
 {
        u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
        struct nfit_memdev *nfit_memdev;
+       struct nfit_flush *nfit_flush;
        struct nfit_dcr *nfit_dcr;
        struct nfit_bdw *nfit_bdw;
        struct nfit_idt *nfit_idt;
@@ -442,6 +462,14 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
                        nfit_mem->idt_bdw = nfit_idt->idt;
                        break;
                }
+
+               list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
+                       if (nfit_flush->flush->device_handle !=
+                                       nfit_memdev->memdev->device_handle)
+                               continue;
+                       nfit_mem->nfit_flush = nfit_flush;
+                       break;
+               }
                break;
        }
 
@@ -978,6 +1006,24 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
        return mmio->base_offset + line_offset + table_offset + sub_line_offset;
 }
 
+static void wmb_blk(struct nfit_blk *nfit_blk)
+{
+
+       if (nfit_blk->nvdimm_flush) {
+               /*
+                * The first wmb() is needed to 'sfence' all previous writes
+                * such that they are architecturally visible for the platform
+                * buffer flush.  Note that we've already arranged for pmem
+                * writes to avoid the cache via arch_memcpy_to_pmem().  The
+                * final wmb() ensures ordering for the NVDIMM flush write.
+                */
+               wmb();
+               writeq(1, nfit_blk->nvdimm_flush);
+               wmb();
+       } else
+               wmb_pmem();
+}
+
 static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
 {
        struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
@@ -1012,7 +1058,10 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
                offset = to_interleave_offset(offset, mmio);
 
        writeq(cmd, mmio->base + offset);
-       /* FIXME: conditionally perform read-back if mandated by firmware */
+       wmb_blk(nfit_blk);
+
+       if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
+               readq(mmio->base + offset);
 }
 
 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1026,7 +1075,6 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
 
        base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
                + lane * mmio->size;
-       /* TODO: non-temporal access, flush hints, cache management etc... */
        write_blk_ctl(nfit_blk, lane, dpa, len, rw);
        while (len) {
                unsigned int c;
@@ -1045,13 +1093,19 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
                }
 
                if (rw)
-                       memcpy(mmio->aperture + offset, iobuf + copied, c);
+                       memcpy_to_pmem(mmio->aperture + offset,
+                                       iobuf + copied, c);
                else
-                       memcpy(iobuf + copied, mmio->aperture + offset, c);
+                       memcpy_from_pmem(iobuf + copied,
+                                       mmio->aperture + offset, c);
 
                copied += c;
                len -= c;
        }
+
+       if (rw)
+               wmb_blk(nfit_blk);
+
        rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
        return rc;
 }
@@ -1124,7 +1178,7 @@ static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
 }
 
 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
-               struct acpi_nfit_system_address *spa)
+               struct acpi_nfit_system_address *spa, enum spa_map_type type)
 {
        resource_size_t start = spa->address;
        resource_size_t n = spa->length;
@@ -1152,8 +1206,15 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
        if (!res)
                goto err_mem;
 
-       /* TODO: cacheability based on the spa type */
-       spa_map->iomem = ioremap_nocache(start, n);
+       if (type == SPA_MAP_APERTURE) {
+               /*
+                * TODO: memremap_pmem() support, but that requires cache
+                * flushing when the aperture is moved.
+                */
+               spa_map->iomem = ioremap_wc(start, n);
+       } else
+               spa_map->iomem = ioremap_nocache(start, n);
+
        if (!spa_map->iomem)
                goto err_map;
 
@@ -1171,6 +1232,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
  * @nvdimm_bus: NFIT-bus that provided the spa table entry
  * @nfit_spa: spa table to map
+ * @type: aperture or control region
  *
  * In the case where block-data-window apertures and
  * dimm-control-regions are interleaved they will end up sharing a
@@ -1180,12 +1242,12 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  * unbound.
  */
 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
-               struct acpi_nfit_system_address *spa)
+               struct acpi_nfit_system_address *spa, enum spa_map_type type)
 {
        void __iomem *iomem;
 
        mutex_lock(&acpi_desc->spa_map_mutex);
-       iomem = __nfit_spa_map(acpi_desc, spa);
+       iomem = __nfit_spa_map(acpi_desc, spa, type);
        mutex_unlock(&acpi_desc->spa_map_mutex);
 
        return iomem;
@@ -1206,12 +1268,35 @@ static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
        return 0;
 }
 
+static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
+               struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
+{
+       struct nd_cmd_dimm_flags flags;
+       int rc;
+
+       memset(&flags, 0, sizeof(flags));
+       rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
+                       sizeof(flags));
+
+       if (rc >= 0 && flags.status == 0)
+               nfit_blk->dimm_flags = flags.flags;
+       else if (rc == -ENOTTY) {
+               /* fall back to a conservative default */
+               nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
+               rc = 0;
+       } else
+               rc = -ENXIO;
+
+       return rc;
+}
+
 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
                struct device *dev)
 {
        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
        struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+       struct nfit_flush *nfit_flush;
        struct nfit_blk_mmio *mmio;
        struct nfit_blk *nfit_blk;
        struct nfit_mem *nfit_mem;
@@ -1223,8 +1308,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
        if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
                dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
                                nfit_mem ? "" : " nfit_mem",
-                               nfit_mem->dcr ? "" : " dcr",
-                               nfit_mem->bdw ? "" : " bdw");
+                               (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
+                               (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
                return -ENXIO;
        }
 
@@ -1237,7 +1322,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
        /* map block aperture memory */
        nfit_blk->bdw_offset = nfit_mem->bdw->offset;
        mmio = &nfit_blk->mmio[BDW];
-       mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);
+       mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+                       SPA_MAP_APERTURE);
        if (!mmio->base) {
                dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
                                nvdimm_name(nvdimm));
@@ -1259,7 +1345,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
        nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
        nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
        mmio = &nfit_blk->mmio[DCR];
-       mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);
+       mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+                       SPA_MAP_CONTROL);
        if (!mmio->base) {
                dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
                                nvdimm_name(nvdimm));
@@ -1277,6 +1364,24 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
                return rc;
        }
 
+       rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
+       if (rc < 0) {
+               dev_dbg(dev, "%s: %s failed get DIMM flags\n",
+                               __func__, nvdimm_name(nvdimm));
+               return rc;
+       }
+
+       nfit_flush = nfit_mem->nfit_flush;
+       if (nfit_flush && nfit_flush->flush->hint_count != 0) {
+               nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
+                               nfit_flush->flush->hint_address[0], 8);
+               if (!nfit_blk->nvdimm_flush)
+                       return -ENOMEM;
+       }
+
+       if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
+               dev_warn(dev, "unable to guarantee persistence of writes\n");
+
        if (mmio->line_size == 0)
                return 0;
 
@@ -1459,6 +1564,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
        INIT_LIST_HEAD(&acpi_desc->dcrs);
        INIT_LIST_HEAD(&acpi_desc->bdws);
        INIT_LIST_HEAD(&acpi_desc->idts);
+       INIT_LIST_HEAD(&acpi_desc->flushes);
        INIT_LIST_HEAD(&acpi_desc->memdevs);
        INIT_LIST_HEAD(&acpi_desc->dimms);
        mutex_init(&acpi_desc->spa_map_mutex);
index 81f2e8c5a79c724b7c66970698e6ca8a3f69520f..79b6d83875c1de1aed4bc67e091a4d2e3aa88204 100644 (file)
@@ -40,6 +40,10 @@ enum nfit_uuids {
        NFIT_UUID_MAX,
 };
 
+enum {
+       ND_BLK_DCR_LATCH = 2,
+};
+
 struct nfit_spa {
        struct acpi_nfit_system_address *spa;
        struct list_head list;
@@ -60,6 +64,11 @@ struct nfit_idt {
        struct list_head list;
 };
 
+struct nfit_flush {
+       struct acpi_nfit_flush_address *flush;
+       struct list_head list;
+};
+
 struct nfit_memdev {
        struct acpi_nfit_memory_map *memdev;
        struct list_head list;
@@ -77,6 +86,7 @@ struct nfit_mem {
        struct acpi_nfit_system_address *spa_bdw;
        struct acpi_nfit_interleave *idt_dcr;
        struct acpi_nfit_interleave *idt_bdw;
+       struct nfit_flush *nfit_flush;
        struct list_head list;
        struct acpi_device *adev;
        unsigned long dsm_mask;
@@ -88,6 +98,7 @@ struct acpi_nfit_desc {
        struct mutex spa_map_mutex;
        struct list_head spa_maps;
        struct list_head memdevs;
+       struct list_head flushes;
        struct list_head dimms;
        struct list_head spas;
        struct list_head dcrs;
@@ -109,7 +120,7 @@ struct nfit_blk {
        struct nfit_blk_mmio {
                union {
                        void __iomem *base;
-                       void *aperture;
+                       void __pmem  *aperture;
                };
                u64 size;
                u64 base_offset;
@@ -123,6 +134,13 @@ struct nfit_blk {
        u64 bdw_offset; /* post interleave offset */
        u64 stat_offset;
        u64 cmd_offset;
+       void __iomem *nvdimm_flush;
+       u32 dimm_flags;
+};
+
+enum spa_map_type {
+       SPA_MAP_CONTROL,
+       SPA_MAP_APERTURE,
 };
 
 struct nfit_spa_mapping {
index c262e4acd68d827cba1273d79e28515c6ebe95fa..3b8963f21b36702c2ed84970908672a4ab21c9f9 100644 (file)
@@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
        if (!addr || !length)
                return;
 
-       acpi_reserve_region(addr, length, gas->space_id, 0, desc);
+       /* Resources are never freed */
+       if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+               request_region(addr, length, desc);
+       else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               request_mem_region(addr, length, desc);
 }
 
-static void __init acpi_reserve_resources(void)
+static int __init acpi_reserve_resources(void)
 {
        acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
                "ACPI PM1a_EVT_BLK");
@@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
        if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
                acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
                               acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+
+       return 0;
 }
+fs_initcall_sync(acpi_reserve_resources);
 
 void acpi_os_printf(const char *fmt, ...)
 {
@@ -1862,7 +1869,6 @@ acpi_status __init acpi_os_initialize(void)
 
 acpi_status __init acpi_os_initialize1(void)
 {
-       acpi_reserve_resources();
        kacpid_wq = alloc_workqueue("kacpid", 0, 1);
        kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
        kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
index 10561ce16ed135165cdbc8e558cb2aaea263d13b..8244f013f21095a9508e80ef01621e0ffbaab106 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/ioport.h>
-#include <linux/list.h>
 #include <linux/slab.h>
 
 #ifdef CONFIG_X86
@@ -622,164 +621,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
        return (type & types) ? 0 : 1;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
-
-struct reserved_region {
-       struct list_head node;
-       u64 start;
-       u64 end;
-};
-
-static LIST_HEAD(reserved_io_regions);
-static LIST_HEAD(reserved_mem_regions);
-
-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
-                        char *desc)
-{
-       unsigned int length = end - start + 1;
-       struct resource *res;
-
-       res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
-               request_region(start, length, desc) :
-               request_mem_region(start, length, desc);
-       if (!res)
-               return -EIO;
-
-       res->flags &= ~flags;
-       return 0;
-}
-
-static int add_region_before(u64 start, u64 end, u8 space_id,
-                            unsigned long flags, char *desc,
-                            struct list_head *head)
-{
-       struct reserved_region *reg;
-       int error;
-
-       reg = kmalloc(sizeof(*reg), GFP_KERNEL);
-       if (!reg)
-               return -ENOMEM;
-
-       error = request_range(start, end, space_id, flags, desc);
-       if (error) {
-               kfree(reg);
-               return error;
-       }
-
-       reg->start = start;
-       reg->end = end;
-       list_add_tail(&reg->node, head);
-       return 0;
-}
-
-/**
- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
- * @start: Starting address of the region.
- * @length: Length of the region.
- * @space_id: Identifier of address space to reserve the region from.
- * @flags: Resource flags to clear for the region after requesting it.
- * @desc: Region description (for messages).
- *
- * Reserve an I/O or memory region as a system resource to prevent others from
- * using it.  If the new region overlaps with one of the regions (in the given
- * address space) already reserved by this routine, only the non-overlapping
- * parts of it will be reserved.
- *
- * Returned is either 0 (success) or a negative error code indicating a resource
- * reservation problem.  It is the code of the first encountered error, but the
- * routine doesn't abort until it has attempted to request all of the parts of
- * the new region that don't overlap with other regions reserved previously.
- *
- * The resources requested by this routine are never released.
- */
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
-                       unsigned long flags, char *desc)
-{
-       struct list_head *regions;
-       struct reserved_region *reg;
-       u64 end = start + length - 1;
-       int ret = 0, error = 0;
-
-       if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
-               regions = &reserved_io_regions;
-       else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               regions = &reserved_mem_regions;
-       else
-               return -EINVAL;
-
-       if (list_empty(regions))
-               return add_region_before(start, end, space_id, flags, desc, regions);
-
-       list_for_each_entry(reg, regions, node)
-               if (reg->start == end + 1) {
-                       /* The new region can be prepended to this one. */
-                       ret = request_range(start, end, space_id, flags, desc);
-                       if (!ret)
-                               reg->start = start;
-
-                       return ret;
-               } else if (reg->start > end) {
-                       /* No overlap.  Add the new region here and get out. */
-                       return add_region_before(start, end, space_id, flags,
-                                                desc, &reg->node);
-               } else if (reg->end == start - 1) {
-                       goto combine;
-               } else if (reg->end >= start) {
-                       goto overlap;
-               }
-
-       /* The new region goes after the last existing one. */
-       return add_region_before(start, end, space_id, flags, desc, regions);
-
- overlap:
-       /*
-        * The new region overlaps an existing one.
-        *
-        * The head part of the new region immediately preceding the existing
-        * overlapping one can be combined with it right away.
-        */
-       if (reg->start > start) {
-               error = request_range(start, reg->start - 1, space_id, flags, desc);
-               if (error)
-                       ret = error;
-               else
-                       reg->start = start;
-       }
-
- combine:
-       /*
-        * The new region is adjacent to an existing one.  If it extends beyond
-        * that region all the way to the next one, it is possible to combine
-        * all three of them.
-        */
-       while (reg->end < end) {
-               struct reserved_region *next = NULL;
-               u64 a = reg->end + 1, b = end;
-
-               if (!list_is_last(&reg->node, regions)) {
-                       next = list_next_entry(reg, node);
-                       if (next->start <= end)
-                               b = next->start - 1;
-               }
-               error = request_range(a, b, space_id, flags, desc);
-               if (!error) {
-                       if (next && next->start == b + 1) {
-                               reg->end = next->end;
-                               list_del(&next->node);
-                               kfree(next);
-                       } else {
-                               reg->end = end;
-                               break;
-                       }
-               } else if (next) {
-                       if (!ret)
-                               ret = error;
-
-                       reg = next;
-               } else {
-                       break;
-               }
-       }
-
-       return ret ? ret : error;
-}
-EXPORT_SYMBOL_GPL(acpi_reserve_region);
index 2649a068671d3914d5f1eae97e4d91b1c2c56ed1..ec256352f42367375bd3375ca7cad62cbd50bab7 100644 (file)
@@ -1019,6 +1019,29 @@ static bool acpi_of_match_device(struct acpi_device *adev,
        return false;
 }
 
+static bool __acpi_match_device_cls(const struct acpi_device_id *id,
+                                   struct acpi_hardware_id *hwid)
+{
+       int i, msk, byte_shift;
+       char buf[3];
+
+       if (!id->cls)
+               return false;
+
+       /* Apply class-code bitmask, before checking each class-code byte */
+       for (i = 1; i <= 3; i++) {
+               byte_shift = 8 * (3 - i);
+               msk = (id->cls_msk >> byte_shift) & 0xFF;
+               if (!msk)
+                       continue;
+
+               sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
+               if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
+                       return false;
+       }
+       return true;
+}
+
 static const struct acpi_device_id *__acpi_match_device(
        struct acpi_device *device,
        const struct acpi_device_id *ids,
@@ -1036,9 +1059,12 @@ static const struct acpi_device_id *__acpi_match_device(
 
        list_for_each_entry(hwid, &device->pnp.ids, list) {
                /* First, check the ACPI/PNP IDs provided by the caller. */
-               for (id = ids; id->id[0]; id++)
-                       if (!strcmp((char *) id->id, hwid->id))
+               for (id = ids; id->id[0] || id->cls; id++) {
+                       if (id->id[0] && !strcmp((char *) id->id, hwid->id))
                                return id;
+                       else if (id->cls && __acpi_match_device_cls(id, hwid))
+                               return id;
+               }
 
                /*
                 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
@@ -2101,6 +2127,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                if (info->valid & ACPI_VALID_UID)
                        pnp->unique_id = kstrdup(info->unique_id.string,
                                                        GFP_KERNEL);
+               if (info->valid & ACPI_VALID_CLS)
+                       acpi_add_id(pnp, info->class_code.string);
 
                kfree(info);
 
index 6d17a3b65ef78c320200acc04ec404b739637442..15e40ee62a94e0c0ce20b07c7d4ff1ce03f78f6b 100644 (file)
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
 
 config ATA_ACPI
        bool "ATA ACPI Support"
-       depends on ACPI && PCI
+       depends on ACPI
        default y
        help
          This option adds support for ATA-related ACPI objects.
index 614c78f510f049286f491d78ec9f3d395300a6f2..1befb114c3844c507939fe82924d60340edce15b 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/platform_device.h>
 #include <linux/libata.h>
 #include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
 #include "ahci.h"
 
 #define DRV_NAME "ahci"
@@ -79,12 +81,19 @@ static const struct of_device_id ahci_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);
 
+static const struct acpi_device_id ahci_acpi_match[] = {
+       { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
 static struct platform_driver ahci_driver = {
        .probe = ahci_probe,
        .remove = ata_platform_remove_one,
        .driver = {
                .name = DRV_NAME,
                .of_match_table = ahci_of_match,
+               .acpi_match_table = ahci_acpi_match,
                .pm = &ahci_pm_ops,
        },
 };
index 9c4288362a8eaab0825d489c6362ceb573cbad30..894bda114224d667077c25c898025b1af83c590a 100644 (file)
@@ -563,10 +563,8 @@ static void fw_dev_release(struct device *dev)
        kfree(fw_priv);
 }
 
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
 {
-       struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
        if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
                return -ENOMEM;
        if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -577,6 +575,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct firmware_priv *fw_priv = to_firmware_priv(dev);
+       int err = 0;
+
+       mutex_lock(&fw_lock);
+       if (fw_priv->buf)
+               err = do_firmware_uevent(fw_priv, env);
+       mutex_unlock(&fw_lock);
+       return err;
+}
+
 static struct class firmware_class = {
        .name           = "firmware",
        .class_attrs    = firmware_class_attrs,
index cdd547bd67df82184dd201b4e1cf9626cf90dd2e..0ee43c1056e045bd6a402908e8a11cf9dac8f34e 100644 (file)
@@ -6,6 +6,7 @@
  * This file is released under the GPLv2.
  */
 
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
@@ -19,6 +20,8 @@
 #include <linux/suspend.h>
 #include <linux/export.h>
 
+#define GENPD_RETRY_MAX_MS     250             /* Approximate */
+
 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)         \
 ({                                                             \
        type (*__routine)(struct device *__d);                  \
@@ -2131,6 +2134,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
 {
        struct generic_pm_domain *pd;
+       unsigned int i;
        int ret = 0;
 
        pd = pm_genpd_lookup_dev(dev);
@@ -2139,10 +2143,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
 
        dev_dbg(dev, "removing from PM domain %s\n", pd->name);
 
-       while (1) {
+       for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
                ret = pm_genpd_remove_device(pd, dev);
                if (ret != -EAGAIN)
                        break;
+
+               mdelay(i);
                cond_resched();
        }
 
@@ -2183,6 +2189,7 @@ int genpd_dev_pm_attach(struct device *dev)
 {
        struct of_phandle_args pd_args;
        struct generic_pm_domain *pd;
+       unsigned int i;
        int ret;
 
        if (!dev->of_node)
@@ -2218,10 +2225,12 @@ int genpd_dev_pm_attach(struct device *dev)
 
        dev_dbg(dev, "adding to PM domain %s\n", pd->name);
 
-       while (1) {
+       for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
                ret = pm_genpd_add_device(pd, dev);
                if (ret != -EAGAIN)
                        break;
+
+               mdelay(i);
                cond_resched();
        }
 
index 7470004ca8105d5e28c48563a0c683c0466915c0..eb6e67451decee999901493d9182ed6c9cf21bf9 100644 (file)
@@ -45,14 +45,12 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
                return -EEXIST;
        }
 
-       dev->power.wakeirq = wirq;
-       spin_unlock_irqrestore(&dev->power.lock, flags);
-
        err = device_wakeup_attach_irq(dev, wirq);
-       if (err)
-               return err;
+       if (!err)
+               dev->power.wakeirq = wirq;
 
-       return 0;
+       spin_unlock_irqrestore(&dev->power.lock, flags);
+       return err;
 }
 
 /**
@@ -105,10 +103,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
                return;
 
        spin_lock_irqsave(&dev->power.lock, flags);
+       device_wakeup_detach_irq(dev);
        dev->power.wakeirq = NULL;
        spin_unlock_irqrestore(&dev->power.lock, flags);
 
-       device_wakeup_detach_irq(dev);
        if (wirq->dedicated_irq)
                free_irq(wirq->irq, wirq);
        kfree(wirq);
index 40f71603378cb5872265c9df994e6ca8cb756ffc..51f15bc15774250a203b46c44b5682b36f450f1d 100644 (file)
@@ -281,32 +281,25 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
  * Attach a device wakeirq to the wakeup source so the device
  * wake IRQ can be configured automatically for suspend and
  * resume.
+ *
+ * Call under the device's power.lock lock.
  */
 int device_wakeup_attach_irq(struct device *dev,
                             struct wake_irq *wakeirq)
 {
        struct wakeup_source *ws;
-       int ret = 0;
 
-       spin_lock_irq(&dev->power.lock);
        ws = dev->power.wakeup;
        if (!ws) {
                dev_err(dev, "forgot to call call device_init_wakeup?\n");
-               ret = -EINVAL;
-               goto unlock;
+               return -EINVAL;
        }
 
-       if (ws->wakeirq) {
-               ret = -EEXIST;
-               goto unlock;
-       }
+       if (ws->wakeirq)
+               return -EEXIST;
 
        ws->wakeirq = wakeirq;
-
-unlock:
-       spin_unlock_irq(&dev->power.lock);
-
-       return ret;
+       return 0;
 }
 
 /**
@@ -314,20 +307,16 @@ unlock:
  * @dev: Device to handle
  *
  * Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
  */
 void device_wakeup_detach_irq(struct device *dev)
 {
        struct wakeup_source *ws;
 
-       spin_lock_irq(&dev->power.lock);
        ws = dev->power.wakeup;
-       if (!ws)
-               goto unlock;
-
-       ws->wakeirq = NULL;
-
-unlock:
-       spin_unlock_irq(&dev->power.lock);
+       if (ws)
+               ws->wakeirq = NULL;
 }
 
 /**
index 152dcb3f7b5f50c9897f571885029f487c6fcdf0..61566bcefa53399ab3af07611398af3f114849c7 100644 (file)
@@ -116,8 +116,10 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
        h32mxclk->pmc = pmc;
 
        clk = clk_register(NULL, &h32mxclk->hw);
-       if (!clk)
+       if (!clk) {
+               kfree(h32mxclk);
                return;
+       }
 
        of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
index c2400456a04471186ec210d8236951d9e4780901..27dfa965cfedb020ebf66b635e0053ca58cefc1c 100644 (file)
@@ -171,8 +171,10 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
        irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
        ret = request_irq(osc->irq, clk_main_osc_irq_handler,
                          IRQF_TRIGGER_HIGH, name, osc);
-       if (ret)
+       if (ret) {
+               kfree(osc);
                return ERR_PTR(ret);
+       }
 
        if (bypass)
                pmc_write(pmc, AT91_CKGR_MOR,
index f98eafe9b12dc92d1c68f99cd5d9cc86addfb75f..5b3ded5205a29177dde69509bd72032f7fff61aa 100644 (file)
@@ -165,12 +165,16 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
        irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
        ret = request_irq(master->irq, clk_master_irq_handler,
                          IRQF_TRIGGER_HIGH, "clk-master", master);
-       if (ret)
+       if (ret) {
+               kfree(master);
                return ERR_PTR(ret);
+       }
 
        clk = clk_register(NULL, &master->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               free_irq(master->irq, master);
                kfree(master);
+       }
 
        return clk;
 }
index cbbe40377ad622a7f9d38aca5651916dda549e54..18b60f4895a68477a74ebe99a8b64542bd7057c9 100644 (file)
@@ -346,12 +346,16 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
        irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
        ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
                          id ? "clk-pllb" : "clk-plla", pll);
-       if (ret)
+       if (ret) {
+               kfree(pll);
                return ERR_PTR(ret);
+       }
 
        clk = clk_register(NULL, &pll->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               free_irq(pll->irq, pll);
                kfree(pll);
+       }
 
        return clk;
 }
index a76d03fd577be05fe86a95d51092380ab01e4eb1..58008b3e8bc175f8befb8126e99e5f55c57d2176 100644 (file)
@@ -130,13 +130,17 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
                irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
                ret = request_irq(sys->irq, clk_system_irq_handler,
                                IRQF_TRIGGER_HIGH, name, sys);
-               if (ret)
+               if (ret) {
+                       kfree(sys);
                        return ERR_PTR(ret);
+               }
        }
 
        clk = clk_register(NULL, &sys->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               free_irq(sys->irq, sys);
                kfree(sys);
+       }
 
        return clk;
 }
index ae3263bc14765042d035bea91a476b45655aae75..30dd697b1668bea6d437655d5c7fc825cda3f52a 100644 (file)
@@ -118,12 +118,16 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
        irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
        ret = request_irq(utmi->irq, clk_utmi_irq_handler,
                          IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
-       if (ret)
+       if (ret) {
+               kfree(utmi);
                return ERR_PTR(ret);
+       }
 
        clk = clk_register(NULL, &utmi->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               free_irq(utmi->irq, utmi);
                kfree(utmi);
+       }
 
        return clk;
 }
index e19c09cd9645557602840576668fbbc4c18b099d..f630e1bbdcfee9ba2016c856ee7f3b9f449a0205 100644 (file)
@@ -222,10 +222,6 @@ void __init iproc_asiu_setup(struct device_node *node,
                struct iproc_asiu_clk *asiu_clk;
                const char *clk_name;
 
-               clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
-               if (WARN_ON(!clk_name))
-                       goto err_clk_register;
-
                ret = of_property_read_string_index(node, "clock-output-names",
                                                    i, &clk_name);
                if (WARN_ON(ret))
@@ -259,7 +255,7 @@ void __init iproc_asiu_setup(struct device_node *node,
 
 err_clk_register:
        for (i = 0; i < num_clks; i++)
-               kfree(asiu->clks[i].name);
+               clk_unregister(asiu->clk_data.clks[i]);
        iounmap(asiu->gate_base);
 
 err_iomap_gate:
index 46fb84bc26740dde1679d7015949977337418561..2dda4e8295a912a4ca28f091fc8a4af86f1c4662 100644 (file)
@@ -366,7 +366,7 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
        val = readl(pll->pll_base + ctrl->ndiv_int.offset);
        ndiv_int = (val >> ctrl->ndiv_int.shift) &
                bit_mask(ctrl->ndiv_int.width);
-       ndiv = ndiv_int << ctrl->ndiv_int.shift;
+       ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
 
        if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
                val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
@@ -374,7 +374,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
                        bit_mask(ctrl->ndiv_frac.width);
 
                if (ndiv_frac != 0)
-                       ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac;
+                       ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
+                               ndiv_frac;
        }
 
        val = readl(pll->pll_base + ctrl->pdiv.offset);
@@ -655,10 +656,6 @@ void __init iproc_pll_clk_setup(struct device_node *node,
                memset(&init, 0, sizeof(init));
                parent_name = node->name;
 
-               clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
-               if (WARN_ON(!clk_name))
-                       goto err_clk_register;
-
                ret = of_property_read_string_index(node, "clock-output-names",
                                                    i, &clk_name);
                if (WARN_ON(ret))
@@ -690,10 +687,8 @@ void __init iproc_pll_clk_setup(struct device_node *node,
        return;
 
 err_clk_register:
-       for (i = 0; i < num_clks; i++) {
-               kfree(pll->clks[i].name);
+       for (i = 0; i < num_clks; i++)
                clk_unregister(pll->clk_data.clks[i]);
-       }
 
 err_pll_register:
        if (pll->asiu_base)
index b9b12a742970d5e195f048e571bd5cf7936694e6..3f6f7ad39490458a6e9cdcf4d944665abd34df86 100644 (file)
@@ -268,7 +268,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
        memcpy(table, stm32f42xx_gate_map, sizeof(table));
 
        /* only bits set in table can be used as indices */
-       if (WARN_ON(secondary > 8 * sizeof(table) ||
+       if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
                    0 == (table[BIT_ULL_WORD(secondary)] &
                          BIT_ULL_MASK(secondary))))
                return -EINVAL;
index 4b9e04cdf7e8e5d7135351db855a8dcfac5e47fb..8b6523d15fb85984f5fb73304fa21803c40b730f 100644 (file)
@@ -700,6 +700,22 @@ static const struct mtk_composite peri_clks[] __initconst = {
        MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
 };
 
+static struct clk_onecell_data *mt8173_top_clk_data __initdata;
+static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
+
+static void __init mtk_clk_enable_critical(void)
+{
+       if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
+               return;
+
+       clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+       clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
+       clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
+       clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+       clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
+       clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
+}
+
 static void __init mtk_topckgen_init(struct device_node *node)
 {
        struct clk_onecell_data *clk_data;
@@ -712,19 +728,19 @@ static void __init mtk_topckgen_init(struct device_node *node)
                return;
        }
 
-       clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+       mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
 
        mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
        mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
        mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
                        &mt8173_clk_lock, clk_data);
 
-       clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
-
        r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
        if (r)
                pr_err("%s(): could not register clock provider: %d\n",
                        __func__, r);
+
+       mtk_clk_enable_critical();
 }
 CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
 
@@ -818,13 +834,13 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
 {
        struct clk_onecell_data *clk_data;
 
-       clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+       mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
        if (!clk_data)
                return;
 
        mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
 
-       clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+       mtk_clk_enable_critical();
 }
 CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
                mtk_apmixedsys_init);
index b95d17fbb8d7edb86fa6754d21b47dc6d1e66ee6..92936f0912d2f86622d5ae1372cf03a046b83979 100644 (file)
@@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
        struct freq_tbl f = *rcg->freq_tbl;
        const struct frac_entry *frac = frac_table_pixel;
-       unsigned long request, src_rate;
+       unsigned long request;
        int delta = 100000;
        u32 mask = BIT(rcg->hid_width) - 1;
        u32 hid_div;
-       int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
-       struct clk *parent = clk_get_parent_by_index(hw->clk, index);
 
        for (; frac->num; frac++) {
                request = (rate * frac->den) / frac->num;
 
-               src_rate = __clk_round_rate(parent, request);
-               if ((src_rate < (request - delta)) ||
-                       (src_rate > (request + delta)))
+               if ((parent_rate < (request - delta)) ||
+                       (parent_rate > (request + delta)))
                        continue;
 
                regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
index 657ca14ba709a64b97e297e74f15f8f5d751efdf..8dd8cce273617e1db85c8af512a431cef14cf328 100644 (file)
@@ -190,7 +190,7 @@ static struct clk *clk_register_flexgen(const char *name,
 
        init.name = name;
        init.ops = &flexgen_ops;
-       init.flags = CLK_IS_BASIC | flexgen_flags;
+       init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
        init.parent_names = parent_names;
        init.num_parents = num_parents;
 
@@ -303,6 +303,8 @@ static void __init st_of_flexgen_setup(struct device_node *np)
        if (!rlock)
                goto err;
 
+       spin_lock_init(rlock);
+
        for (i = 0; i < clk_data->clk_num; i++) {
                struct clk *clk;
                const char *clk_name;
index e94197f04b0b3bae5a811c70e7d91393a2a263c6..d9eb2e1d84710c34abe8b656b4dd5e89a1ac7f9a 100644 (file)
@@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
                    CLKGEN_FIELD(0x30c, 0xf, 20),
                    CLKGEN_FIELD(0x310, 0xf, 20) },
        .lockstatus_present = true,
-       .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+       .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
        .powerup_polarity = 1,
        .standby_polarity = 1,
        .pll_ops        = &st_quadfs_pll_c32_ops,
@@ -489,7 +489,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw)
        struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
        u32 npda = CLKGEN_READ(pll, npda);
 
-       return !!npda;
+       return pll->data->powerup_polarity ? !npda : !!npda;
 }
 
 static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
@@ -635,7 +635,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
 
        init.name = name;
        init.ops = quadfs->pll_ops;
-       init.flags = CLK_IS_BASIC;
+       init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
        init.parent_names = &parent_name;
        init.num_parents = 1;
 
@@ -774,7 +774,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw)
        if (fs->lock)
                spin_lock_irqsave(fs->lock, flags);
 
-       CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+       CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
 
        if (fs->lock)
                spin_unlock_irqrestore(fs->lock, flags);
@@ -1082,10 +1082,6 @@ static const struct of_device_id quadfs_of_match[] = {
                .compatible = "st,stih407-quadfs660-D",
                .data = &st_fs660c32_D_407
        },
-       {
-               .compatible = "st,stih407-quadfs660-D",
-               .data = (void *)&st_fs660c32_D_407
-       },
        {}
 };
 
index 4fbe6e099587c0541ce9a22783fb2dc4e4becf8c..717c4a91a17b12643146eb028ed02dd0727eac50 100644 (file)
@@ -237,7 +237,7 @@ static struct clk *clk_register_genamux(const char *name,
 
        init.name = name;
        init.ops = &clkgena_divmux_ops;
-       init.flags = CLK_IS_BASIC;
+       init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
        init.parent_names = parent_names;
        init.num_parents = num_parents;
 
@@ -513,7 +513,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
                                          0, &clk_name))
                return;
 
-       clk = clk_register_divider_table(NULL, clk_name, parent_name, 0,
+       clk = clk_register_divider_table(NULL, clk_name, parent_name,
+                                        CLK_GET_RATE_NOCACHE,
                                         reg + data->offset, data->shift, 1,
                                         0, data->table, NULL);
        if (IS_ERR(clk))
@@ -582,7 +583,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
 };
 static struct clkgen_mux_data stih407_a9_mux_data = {
        .offset = 0x1a4,
-       .shift = 1,
+       .shift = 0,
        .width = 2,
 };
 
@@ -786,7 +787,8 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
                                             &mux->hw, &clk_mux_ops,
                                             &div->hw, &clk_divider_ops,
                                             &gate->hw, &clk_gate_ops,
-                                            data->clk_flags);
+                                            data->clk_flags |
+                                            CLK_GET_RATE_NOCACHE);
                if (IS_ERR(clk)) {
                        kfree(gate);
                        kfree(div);
index 1065322072136bd90c868f9d6a2fd4c56104f303..72d1c27eaffa8247a0e49d029c21d2481caa83a7 100644 (file)
@@ -406,7 +406,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
        init.name = clk_name;
        init.ops = pll_data->ops;
 
-       init.flags = CLK_IS_BASIC;
+       init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
        init.parent_names = &parent_name;
        init.num_parents  = 1;
 
index 9a82f17d2d73daf611ac5e8651e35c2101ea6245..abf7b37faf73f3dd2366024a5bb97affc152cda2 100644 (file)
@@ -1391,6 +1391,7 @@ static void __init sun6i_init_clocks(struct device_node *node)
 CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
 CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
 CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
+CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
 
 static void __init sun9i_init_clocks(struct device_node *node)
 {
index 879c784235469c4cd1a57ecfaa15fdc79ab0d289..2d59038dec43512e40ca46f4a0989c3b1253e0af 100644 (file)
@@ -529,6 +529,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np)
 
 CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
 CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
+CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
 CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
 CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
 CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
index fc897babab55c6ec07e0f4c02c7deb856cd34123..e362860c2b50c49ad5289169e68b8baa2a90197c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * The 2E revision of loongson processor not support this feature.
  *
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
  * Author: Yanhua, yanh@lemote.com
  *
  * This file is subject to the terms and conditions of the GNU General Public
index 67f80813a06f9a780e6573a7dd995f0150c47dcf..e4311ce0cd78cfc93eea1cec30a37a9ec3b54d25 100644 (file)
@@ -494,8 +494,9 @@ out:
 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct blkcipher_desc desc;
-       u8 *iv = nx_ctx->priv.ccm.iv;
+       u8 *iv = rctx->iv;
 
        iv[0] = 3;
        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct blkcipher_desc desc;
-       u8 *iv = nx_ctx->priv.ccm.iv;
+       u8 *iv = rctx->iv;
 
        iv[0] = 3;
        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
index 2617cd4d54dd2e458e8a56de967c468a66ef2f09..dd7e9f3f5b6b2edfb0a4e1c1e442523a30d4f74b 100644 (file)
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
        if (key_len < CTR_RFC3686_NONCE_SIZE)
                return -EINVAL;
 
-       memcpy(nx_ctx->priv.ctr.iv,
+       memcpy(nx_ctx->priv.ctr.nonce,
               in_key + key_len - CTR_RFC3686_NONCE_SIZE,
               CTR_RFC3686_NONCE_SIZE);
 
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
                                unsigned int           nbytes)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
-       u8 *iv = nx_ctx->priv.ctr.iv;
+       u8 iv[16];
 
+       memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
        memcpy(iv + CTR_RFC3686_NONCE_SIZE,
               desc->info, CTR_RFC3686_IV_SIZE);
        iv[12] = iv[13] = iv[14] = 0;
        iv[15] = 1;
 
-       desc->info = nx_ctx->priv.ctr.iv;
+       desc->info = iv;
 
        return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 }
index 08ac6d48688c7cf80589965b34724f374ec41b61..92c993f08213fbd767c897eae178a5129c5af899 100644 (file)
@@ -317,6 +317,7 @@ out:
 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct blkcipher_desc desc;
        unsigned int nbytes = req->cryptlen;
@@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-       desc.info = nx_ctx->priv.gcm.iv;
+       desc.info = rctx->iv;
        /* initialize the counter */
        *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
@@ -424,8 +425,8 @@ out:
 
 static int gcm_aes_nx_encrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
 
        memcpy(iv, req->iv, 12);
 
@@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
 
 static int gcm_aes_nx_decrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
 
        memcpy(iv, req->iv, 12);
 
@@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
 
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
 
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
index 8c2faffab4a35952c7b28bccf04c7ea5f706a471..c2f7d4befb5599fa4e7c947ba25b796c8612d24e 100644 (file)
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
                           unsigned int         key_len)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+       struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 
        switch (key_len) {
        case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
                return -EINVAL;
        }
 
-       memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+       memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
 
        return 0;
 }
@@ -148,32 +149,29 @@ out:
        return rc;
 }
 
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
 {
-       struct xcbc_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
-       struct nx_sg *out_sg;
-       int len;
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_AES);
+       err = nx_crypto_ctx_aes_xcbc_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_AES);
 
        NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
        csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
 
-       memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
-       memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
-       len = AES_BLOCK_SIZE;
-       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-                                 &len, nx_ctx->ap->sglen);
+       return 0;
+}
 
-       if (len != AES_BLOCK_SIZE)
-               return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+       struct xcbc_state *sctx = shash_desc_ctx(desc);
 
-       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+       memset(sctx, 0, sizeof *sctx);
 
        return 0;
 }
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u32 to_process = 0, leftover, total;
        unsigned int max_sg_len;
        unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
        max_sg_len = min_t(u64, max_sg_len,
                                nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
+       data_len = AES_BLOCK_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &len, nx_ctx->ap->sglen);
+
+       if (data_len != AES_BLOCK_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
        do {
                to_process = total - to_process;
                to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
                                                (u8 *) sctx->buffer,
                                                &data_len,
                                                max_sg_len);
-                       if (data_len != sctx->count)
-                               return -EINVAL;
+                       if (data_len != sctx->count) {
+                               rc = -EINVAL;
+                               goto out;
+                       }
                }
 
                data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
                                        &data_len,
                                        max_sg_len);
 
-               if (data_len != to_process - sctx->count)
-                       return -EINVAL;
+               if (data_len != to_process - sctx->count) {
+                       rc = -EINVAL;
+                       goto out;
+               }
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
                                        sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
                                 &len, nx_ctx->ap->sglen);
 
-       if (len != sctx->count)
-               return -EINVAL;
+       if (len != sctx->count) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        len = AES_BLOCK_SIZE;
        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
                                  nx_ctx->ap->sglen);
 
-       if (len != AES_BLOCK_SIZE)
-               return -EINVAL;
+       if (len != AES_BLOCK_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
                .cra_blocksize   = AES_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_aes_xcbc_init,
+               .cra_init        = nx_crypto_ctx_aes_xcbc_init2,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
index 4e91bdb83c594c3491bcb183cddd4021e8d8c596..08f8d5cd633491e3ff0e28ca8204d7f51be2b05b 100644 (file)
 #include "nx.h"
 
 
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
 {
-       struct sha256_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-       struct nx_sg *out_sg;
-       int len;
-       u32 max_sg_len;
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+       err = nx_crypto_ctx_sha_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
 
        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
 
-       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-       max_sg_len = min_t(u64, max_sg_len,
-                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+       return 0;
+}
 
-       len = SHA256_DIGEST_SIZE;
-       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-                                 &len, max_sg_len);
-       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha256_init(struct shash_desc *desc) {
+       struct sha256_state *sctx = shash_desc_ctx(desc);
 
-       if (len != SHA256_DIGEST_SIZE)
-               return -EINVAL;
+       memset(sctx, 0, sizeof *sctx);
 
        sctx->state[0] = __cpu_to_be32(SHA256_H0);
        sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -78,6 +72,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
        int rc = 0;
@@ -108,6 +103,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        max_sg_len = min_t(u64, max_sg_len,
                        nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
+       data_len = SHA256_DIGEST_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &data_len, max_sg_len);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+       if (data_len != SHA256_DIGEST_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        do {
                /*
                 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
@@ -282,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
                .cra_blocksize   = SHA256_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_sha_init,
+               .cra_init        = nx_crypto_ctx_sha256_init,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
index e6a58d2ee62894e369c563f5b826d0342a6a729d..aff0fe58eac0b7aba11b465a192c280ef19fdbac 100644 (file)
 #include "nx.h"
 
 
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
 {
-       struct sha512_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-       struct nx_sg *out_sg;
-       int len;
-       u32 max_sg_len;
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+       err = nx_crypto_ctx_sha_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
 
        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
 
-       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-       max_sg_len = min_t(u64, max_sg_len,
-                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+       return 0;
+}
 
-       len = SHA512_DIGEST_SIZE;
-       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-                                 &len, max_sg_len);
-       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha512_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
 
-       if (len != SHA512_DIGEST_SIZE)
-               return -EINVAL;
+       memset(sctx, 0, sizeof *sctx);
 
        sctx->state[0] = __cpu_to_be64(SHA512_H0);
        sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -77,6 +72,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
        int rc = 0;
@@ -107,6 +103,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        max_sg_len = min_t(u64, max_sg_len,
                        nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
+       data_len = SHA512_DIGEST_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &data_len, max_sg_len);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+       if (data_len != SHA512_DIGEST_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        do {
                /*
                 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
@@ -288,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
                .cra_blocksize   = SHA512_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_sha_init,
+               .cra_init        = nx_crypto_ctx_sha512_init,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
index f6198f29a4a811b0cbcb2fb5062ece5a5d2bb421..436971343ff7732b3be14af822e29fcc922c1e0f 100644 (file)
@@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
 /* entry points from the crypto tfm initializers */
 int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
 {
+       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+                               sizeof(struct nx_ccm_rctx));
        return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_CCM);
 }
 
 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
 {
+       crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
        return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_GCM);
 }
index de3ea8738146ba100faf1ede99f5058d6d6f5824..cdff03a42ae7e883648a981779bed47bb9909c92 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __NX_H__
 #define __NX_H__
 
+#include <crypto/ctr.h>
+
 #define NX_NAME                "nx-crypto"
 #define NX_STRING      "IBM Power7+ Nest Accelerator Crypto Driver"
 #define NX_VERSION     "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
 
 #define NX_GCM4106_NONCE_LEN           (4)
 #define NX_GCM_CTR_OFFSET              (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
        u8 iv[16];
+};
+
+struct nx_gcm_priv {
        u8 iauth_tag[16];
        u8 nonce[NX_GCM4106_NONCE_LEN];
 };
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
 #define NX_CCM_AES_KEY_LEN             (16)
 #define NX_CCM4309_AES_KEY_LEN         (19)
 #define NX_CCM4309_NONCE_LEN           (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
        u8 iv[16];
+};
+
+struct nx_ccm_priv {
        u8 b0[16];
        u8 iauth_tag[16];
        u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
 };
 
 struct nx_ctr_priv {
-       u8 iv[16];
+       u8 nonce[CTR_RFC3686_NONCE_SIZE];
 };
 
 struct nx_crypto_ctx {
index 46307098f8bab497734ca5dbbfb7fe2502ed082f..0a70e46d54165e96ba469088f315386b0947fab0 100644 (file)
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
        dmaengine_terminate_all(dd->dma_lch_in);
        dmaengine_terminate_all(dd->dma_lch_out);
 
-       dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
-       dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
-
        return err;
 }
 
index 975edb1000a202e3dcd2b7cbe2cc2c8916aa855f..ae43b58c9733a1962cbd6dae4ce42fb8c52fa81a 100644 (file)
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
        if (((int64_t)timeout_ns) < 0)
                return MAX_SCHEDULE_TIMEOUT;
 
-       timeout = ktime_sub_ns(ktime_get(), timeout_ns);
+       timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
        if (ktime_to_ns(timeout) < 0)
                return 0;
 
index 5cde635978f940ff250ffb22a52c77a0db38ea26..6e77964f1b640d6841ca216ab2c7f5eed73077f5 100644 (file)
@@ -3403,19 +3403,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
 
        switch (entry->src_data) {
        case 0: /* vblank */
-               if (disp_int & interrupt_status_offsets[crtc].vblank) {
+               if (disp_int & interrupt_status_offsets[crtc].vblank)
                        dce_v10_0_crtc_vblank_int_ack(adev, crtc);
-                       if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                               drm_handle_vblank(adev->ddev, crtc);
-                       }
-                       DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                       drm_handle_vblank(adev->ddev, crtc);
                }
+               DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
                break;
        case 1: /* vline */
-               if (disp_int & interrupt_status_offsets[crtc].vline) {
+               if (disp_int & interrupt_status_offsets[crtc].vline)
                        dce_v10_0_crtc_vline_int_ack(adev, crtc);
-                       DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-               }
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
                break;
        default:
                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
index 95efd98b202d5d74e7329b009e944dd7f8bdcfab..7f7abb0e0be53026d98e074d4c0a756484eb3e1b 100644 (file)
@@ -3402,19 +3402,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
 
        switch (entry->src_data) {
        case 0: /* vblank */
-               if (disp_int & interrupt_status_offsets[crtc].vblank) {
+               if (disp_int & interrupt_status_offsets[crtc].vblank)
                        dce_v11_0_crtc_vblank_int_ack(adev, crtc);
-                       if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                               drm_handle_vblank(adev->ddev, crtc);
-                       }
-                       DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                       drm_handle_vblank(adev->ddev, crtc);
                }
+               DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
                break;
        case 1: /* vline */
-               if (disp_int & interrupt_status_offsets[crtc].vline) {
+               if (disp_int & interrupt_status_offsets[crtc].vline)
                        dce_v11_0_crtc_vline_int_ack(adev, crtc);
-                       DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-               }
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
                break;
        default:
                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
index aaca8d663f2c60e97921e0c06a69a1c7a4549322..08387dfd98a7f008cb67dc98160bfd6eaaedadc1 100644 (file)
@@ -3237,19 +3237,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
 
        switch (entry->src_data) {
        case 0: /* vblank */
-               if (disp_int & interrupt_status_offsets[crtc].vblank) {
+               if (disp_int & interrupt_status_offsets[crtc].vblank)
                        WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
-                       if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                               drm_handle_vblank(adev->ddev, crtc);
-                       }
-                       DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                       drm_handle_vblank(adev->ddev, crtc);
                }
+               DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
                break;
        case 1: /* vline */
-               if (disp_int & interrupt_status_offsets[crtc].vline) {
+               if (disp_int & interrupt_status_offsets[crtc].vline)
                        WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
-                       DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-               }
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
                break;
        default:
                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
index 8a1f999daa2444e613f0c61960ef3bfe31f0aa82..9be007081b72a8b5c75b4baf1fe01e92bfe792bc 100644 (file)
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
        pqm_uninit(&p->pqm);
 
        pdd = kfd_get_process_device_data(dev, p);
+
+       if (!pdd) {
+               mutex_unlock(&p->mutex);
+               return;
+       }
+
        if (pdd->reset_wavefronts) {
                dbgdev_wave_reset_wavefronts(pdd->dev, p);
                pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
         * We don't call amd_iommu_unbind_pasid() here
         * because the IOMMU called us.
         */
-       if (pdd)
-               pdd->bound = false;
+       pdd->bound = false;
 
        mutex_unlock(&p->mutex);
 }
index 8867818b140173fc3c107b299a7b553a6287f8f9..d65cbe6afb92d4c1d11b06d9e144eea35f8db56b 100644 (file)
@@ -157,9 +157,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        struct drm_i915_gem_object *obj;
        int ret;
 
-       obj = i915_gem_object_create_stolen(dev, size);
-       if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, size);
+       obj = i915_gem_alloc_object(dev, size);
        if (obj == NULL)
                return ERR_PTR(-ENOMEM);
 
index 9daa2883ac186f73c64baa40ba61aadfd0a94c9a..dcc6a88c560ebaf2b788971fe36984bd7402cf92 100644 (file)
@@ -2546,6 +2546,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
+       struct i915_vma *vma;
+       bool flush;
 
        i915_check_and_clear_faults(dev);
 
@@ -2555,16 +2557,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
                                       dev_priv->gtt.base.total,
                                       true);
 
+       /* Cache flush objects bound into GGTT and rebind them. */
+       vm = &dev_priv->gtt.base;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-                                                          &dev_priv->gtt.base);
-               if (!vma)
-                       continue;
+               flush = false;
+               list_for_each_entry(vma, &obj->vma_list, vma_link) {
+                       if (vma->vm != vm)
+                               continue;
 
-               i915_gem_clflush_object(obj, obj->pin_display);
-               WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
-       }
+                       WARN_ON(i915_vma_bind(vma, obj->cache_level,
+                                             PIN_UPDATE));
 
+                       flush = true;
+               }
+
+               if (flush)
+                       i915_gem_clflush_object(obj, obj->pin_display);
+       }
 
        if (INTEL_INFO(dev)->gen >= 8) {
                if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
index 633bd1fcab6925881048e7310f9aa40e4f9db868..d61e74a08f829b1491b61e4557c689546dce9c3f 100644 (file)
@@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                if (IS_GEN4(dev)) {
                        uint32_t ddc2 = I915_READ(DCC2);
 
-                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+                               /* Since the swizzling may vary within an
+                                * object, we have no idea what the swizzling
+                                * is for any page in particular. Thus we
+                                * cannot migrate tiled pages using the GPU,
+                                * nor can we tell userspace what the exact
+                                * swizzling is for any object.
+                                */
                                dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+                               swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                               swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       }
                }
 
                if (dcc == 0xffffffff) {
index 1b61f98103870171e75338595b32e5dd473523e0..647b1404c441374beba20b32be37192d0b0ace95 100644 (file)
@@ -4854,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
        struct intel_plane *intel_plane;
        int pipe = intel_crtc->pipe;
 
+       if (!intel_crtc->active)
+               return;
+
        intel_crtc_wait_for_pending_flips(crtc);
 
        intel_pre_disable_primary(crtc);
@@ -7887,7 +7890,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
        int pipe = pipe_config->cpu_transcoder;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        intel_clock_t clock;
-       u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+       u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
        int refclk = 100000;
 
        mutex_lock(&dev_priv->sb_lock);
@@ -7895,10 +7898,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
        pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
        pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
        pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+       pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
        mutex_unlock(&dev_priv->sb_lock);
 
        clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
-       clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+       clock.m2 = (pll_dw0 & 0xff) << 22;
+       if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+               clock.m2 |= pll_dw2 & 0x3fffff;
        clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
        clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
        clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
index f2daad8c3d960e2eba99d55f996ba658a0a97227..7841970de48d19e10bc23a0bde4a2388b05708f5 100644 (file)
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
 
        if (wait) {
                if (!wait_for_completion_timeout(&engine->compl,
-                               msecs_to_jiffies(1))) {
+                               msecs_to_jiffies(100))) {
                        dev_err(dmm->dev, "timed out waiting for done\n");
                        ret = -ETIMEDOUT;
                }
index ae2df41f216f04652b8d18817ee80e9fddd41147..12081e61d45a99b02956bda4dc5759036ef64d23 100644 (file)
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
                struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
 struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
 int omap_framebuffer_pin(struct drm_framebuffer *fb);
-int omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                struct omap_drm_window *win, struct omap_overlay_info *info);
 struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
                enum dma_data_direction dir);
 int omap_gem_get_paddr(struct drm_gem_object *obj,
                dma_addr_t *paddr, bool remap);
-int omap_gem_put_paddr(struct drm_gem_object *obj);
+void omap_gem_put_paddr(struct drm_gem_object *obj);
 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
                bool remap);
 int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
        /* PVR needs alignment to 8 pixels.. right now that is the most
         * restrictive stride requirement..
         */
-       return ALIGN(pitch, 8 * bytespp);
+       return roundup(pitch, 8 * bytespp);
 }
 
 /* map crtc to vblank mask */
index 0b967e76df1a875a9f8139b6a2482e5997965412..51b1219af87f1b29fbbe641fa2fbfd51b5c3b92d 100644 (file)
@@ -287,10 +287,10 @@ fail:
 }
 
 /* unpin, no longer being scanned out: */
-int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+void omap_framebuffer_unpin(struct drm_framebuffer *fb)
 {
        struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       int ret, i, n = drm_format_num_planes(fb->pixel_format);
+       int i, n = drm_format_num_planes(fb->pixel_format);
 
        mutex_lock(&omap_fb->lock);
 
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
 
        if (omap_fb->pin_count > 0) {
                mutex_unlock(&omap_fb->lock);
-               return 0;
+               return;
        }
 
        for (i = 0; i < n; i++) {
                struct plane *plane = &omap_fb->planes[i];
-               ret = omap_gem_put_paddr(plane->bo);
-               if (ret)
-                       goto fail;
+               omap_gem_put_paddr(plane->bo);
                plane->paddr = 0;
        }
 
        mutex_unlock(&omap_fb->lock);
-
-       return 0;
-
-fail:
-       mutex_unlock(&omap_fb->lock);
-       return ret;
 }
 
 struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
index 23b5a84389e3b23b0245af9146128862c571b639..720d16bce7e82b790a7aff17f5bbec94b6df18fe 100644 (file)
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
        fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
        if (fbdev->ywrap_enabled) {
                /* need to align pitch to page size if using DMM scrolling */
-               mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+               mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
        }
 
        /* allocate backing bo */
index 2ab77801cf5f070ab08bc5f8255c6126291b94b3..7ed08fdc4c4285eff29f109ce67de4b79e970de2 100644 (file)
@@ -808,10 +808,10 @@ fail:
 /* Release physical address, when DMA is no longer being performed.. this
  * could potentially unpin and unmap buffers from TILER
  */
-int omap_gem_put_paddr(struct drm_gem_object *obj)
+void omap_gem_put_paddr(struct drm_gem_object *obj)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       int ret = 0;
+       int ret;
 
        mutex_lock(&obj->dev->struct_mutex);
        if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
                        if (ret) {
                                dev_err(obj->dev->dev,
                                        "could not unpin pages: %d\n", ret);
-                               goto fail;
                        }
                        ret = tiler_release(omap_obj->block);
                        if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
                        omap_obj->block = NULL;
                }
        }
-fail:
+
        mutex_unlock(&obj->dev->struct_mutex);
-       return ret;
 }
 
 /* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
 
        omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
        if (!omap_obj)
-               goto fail;
-
-       spin_lock(&priv->list_lock);
-       list_add(&omap_obj->mm_list, &priv->obj_list);
-       spin_unlock(&priv->list_lock);
+               return NULL;
 
        obj = &omap_obj->base;
 
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                 */
                omap_obj->vaddr =  dma_alloc_writecombine(dev->dev, size,
                                &omap_obj->paddr, GFP_KERNEL);
-               if (omap_obj->vaddr)
-                       flags |= OMAP_BO_DMA;
+               if (!omap_obj->vaddr) {
+                       kfree(omap_obj);
+
+                       return NULL;
+               }
 
+               flags |= OMAP_BO_DMA;
        }
 
+       spin_lock(&priv->list_lock);
+       list_add(&omap_obj->mm_list, &priv->obj_list);
+       spin_unlock(&priv->list_lock);
+
        omap_obj->flags = flags;
 
        if (flags & OMAP_BO_TILED) {
index cfa8276c4debafe4286f424f300936bbc42f009a..098904696a5cada72fab1e4f221ce7621af197a5 100644 (file)
@@ -17,6 +17,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
        dispc_ovl_enable(omap_plane->id, false);
 }
 
+static int omap_plane_atomic_check(struct drm_plane *plane,
+                                  struct drm_plane_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+
+       if (!state->crtc)
+               return 0;
+
+       crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+       if (IS_ERR(crtc_state))
+               return PTR_ERR(crtc_state);
+
+       if (state->crtc_x < 0 || state->crtc_y < 0)
+               return -EINVAL;
+
+       if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
+               return -EINVAL;
+
+       if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
+               return -EINVAL;
+
+       return 0;
+}
+
 static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
        .prepare_fb = omap_plane_prepare_fb,
        .cleanup_fb = omap_plane_cleanup_fb,
+       .atomic_check = omap_plane_atomic_check,
        .atomic_update = omap_plane_atomic_update,
        .atomic_disable = omap_plane_atomic_disable,
 };
index 4ecf5caa8c6d9745f9421710179aa2f81ef3587f..248953d2fdb742d69111193f62638c0a95f179d5 100644 (file)
@@ -7964,23 +7964,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@ restart_ih:
                case 2: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D2 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@ restart_ih:
                case 3: /* D3 vblank/vline */
                        switch (src_data) {
                        case 0: /* D3 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[2]) {
-                                               drm_handle_vblank(rdev->ddev, 2);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[2]))
-                                               radeon_crtc_handle_vblank(rdev, 2);
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[2]) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[2]))
+                                       radeon_crtc_handle_vblank(rdev, 2);
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vblank\n");
+
                                break;
                        case 1: /* D3 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@ restart_ih:
                case 4: /* D4 vblank/vline */
                        switch (src_data) {
                        case 0: /* D4 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[3]) {
-                                               drm_handle_vblank(rdev->ddev, 3);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[3]))
-                                               radeon_crtc_handle_vblank(rdev, 3);
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[3]) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[3]))
+                                       radeon_crtc_handle_vblank(rdev, 3);
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vblank\n");
+
                                break;
                        case 1: /* D4 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@ restart_ih:
                case 5: /* D5 vblank/vline */
                        switch (src_data) {
                        case 0: /* D5 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[4]) {
-                                               drm_handle_vblank(rdev->ddev, 4);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[4]))
-                                               radeon_crtc_handle_vblank(rdev, 4);
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[4]) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[4]))
+                                       radeon_crtc_handle_vblank(rdev, 4);
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vblank\n");
+
                                break;
                        case 1: /* D5 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@ restart_ih:
                case 6: /* D6 vblank/vline */
                        switch (src_data) {
                        case 0: /* D6 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[5]) {
-                                               drm_handle_vblank(rdev->ddev, 5);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[5]))
-                                               radeon_crtc_handle_vblank(rdev, 5);
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[5]) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[5]))
+                                       radeon_crtc_handle_vblank(rdev, 5);
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vblank\n");
+
                                break;
                        case 1: /* D6 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@ restart_ih:
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
+
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
+
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
+
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
+
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
+
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
+
                                break;
                        case 6:
-                               if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 1\n");
+
                                break;
                        case 7:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 2\n");
+
                                break;
                        case 8:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 3\n");
+
                                break;
                        case 9:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 4\n");
+
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 5\n");
+
                                break;
                        case 11:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 6\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
index 3a6d483a2c36a75cd52edecdb58928404629567a..0acde1949c18d4bcc52790754ab41f3ae74ffd6f 100644 (file)
@@ -4924,7 +4924,7 @@ restart_ih:
                return IRQ_NONE;
 
        rptr = rdev->ih.rptr;
-       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+       DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
        /* Order reading of wptr vs. reading of IH ring data */
        rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
                case 2: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D2 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
                case 3: /* D3 vblank/vline */
                        switch (src_data) {
                        case 0: /* D3 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[2]) {
-                                               drm_handle_vblank(rdev->ddev, 2);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[2]))
-                                               radeon_crtc_handle_vblank(rdev, 2);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[2]) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[2]))
+                                       radeon_crtc_handle_vblank(rdev, 2);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vblank\n");
+
                                break;
                        case 1: /* D3 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
                case 4: /* D4 vblank/vline */
                        switch (src_data) {
                        case 0: /* D4 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[3]) {
-                                               drm_handle_vblank(rdev->ddev, 3);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[3]))
-                                               radeon_crtc_handle_vblank(rdev, 3);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[3]) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[3]))
+                                       radeon_crtc_handle_vblank(rdev, 3);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vblank\n");
+
                                break;
                        case 1: /* D4 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
                case 5: /* D5 vblank/vline */
                        switch (src_data) {
                        case 0: /* D5 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[4]) {
-                                               drm_handle_vblank(rdev->ddev, 4);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[4]))
-                                               radeon_crtc_handle_vblank(rdev, 4);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[4]) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[4]))
+                                       radeon_crtc_handle_vblank(rdev, 4);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vblank\n");
+
                                break;
                        case 1: /* D5 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
                case 6: /* D6 vblank/vline */
                        switch (src_data) {
                        case 0: /* D6 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[5]) {
-                                               drm_handle_vblank(rdev->ddev, 5);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[5]))
-                                               radeon_crtc_handle_vblank(rdev, 5);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[5]) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[5]))
+                                       radeon_crtc_handle_vblank(rdev, 5);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vblank\n");
+
                                break;
                        case 1: /* D6 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
                                break;
                        case 6:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 1\n");
                                break;
                        case 7:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 2\n");
                                break;
                        case 8:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 3\n");
                                break;
                        case 9:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 4\n");
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 5\n");
                                break;
                        case 11:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 6\n");
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
                case 44: /* hdmi */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI0\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI0\n");
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI1\n");
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI2\n");
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI3\n");
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI4\n");
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI5\n");
                                break;
                        default:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
index 8e5aeeb058a5a59d3bd8fa65937d7b652d853bb8..158872eb78e41ae94276b1abdad3fdce76ef463f 100644 (file)
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
                        DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
        }
 
-       ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-       if (ring->ring_size)
-               r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+       if (rdev->family == CHIP_ARUBA) {
+               ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+               if (ring->ring_size)
+                       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
 
-       ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-       if (ring->ring_size)
-               r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+               ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+               if (ring->ring_size)
+                       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
 
-       if (!r)
-               r = vce_v1_0_init(rdev);
-       else if (r != -ENOENT)
-               DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+               if (!r)
+                       r = vce_v1_0_init(rdev);
+               if (r)
+                       DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+       }
 
        r = radeon_ib_pool_init(rdev);
        if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
        radeon_irq_kms_fini(rdev);
        uvd_v1_0_fini(rdev);
        radeon_uvd_fini(rdev);
-       radeon_vce_fini(rdev);
+       if (rdev->family == CHIP_ARUBA)
+               radeon_vce_fini(rdev);
        cayman_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
index 35dafd77a639f0fbf404671ea348049f51bfaa9c..4ea5b10ff5f412fb0fc2b161c8fe936ab154bd2f 100644 (file)
@@ -4086,23 +4086,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+                                   DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
                case 5: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
                case 19: /* HPD/DAC hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
                                break;
                        case 12:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
                case 21: /* hdmi */
                        switch (src_data) {
                        case 4:
-                               if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI0\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI0\n");
+
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI1\n");
+
                                break;
                        default:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
index 09e3f39925faa832661e59333bed740981c10621..98f9adaccc3dadfc75c85ff5a7cbdf9994e48abc 100644 (file)
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
        struct drm_buf *buf;
        u32 *buffer;
        const u8 __user *data;
-       int size, pass_size;
+       unsigned int size, pass_size;
        u64 src_offset, dst_offset;
 
        if (!radeon_check_offset(dev_priv, tex->offset)) {
index 45e54060ee97eea33cd005b993c6ca704cf80a16..afaf346bd50e7c7628674e29d0eea14c8403e3ec 100644 (file)
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
        struct radeon_device *rdev = crtc->dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev)) {
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+                      upper_32_bits(radeon_crtc->cursor_addr));
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+                      lower_32_bits(radeon_crtc->cursor_addr));
                WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
                WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
                       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
                       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
        } else if (ASIC_IS_AVIVO(rdev)) {
+               if (rdev->family >= CHIP_RV770) {
+                       if (radeon_crtc->crtc_id)
+                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(radeon_crtc->cursor_addr));
+                       else
+                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(radeon_crtc->cursor_addr));
+               }
+
+               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+                      lower_32_bits(radeon_crtc->cursor_addr));
                WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
                WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
                       (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
        } else {
+               /* offset is from DISP(2)_BASE_ADDRESS */
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+                      radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+
                switch (radeon_crtc->crtc_id) {
                case 0:
                        WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
                        | (x << 16)
                        | y));
                /* offset is from DISP(2)_BASE_ADDRESS */
-               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
-                                                                     (yorigin * 256)));
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+                      radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+                      yorigin * 256);
        }
 
        radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
        return ret;
 }
 
-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
-{
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_device *rdev = crtc->dev->dev_private;
-       struct radeon_bo *robj = gem_to_radeon_bo(obj);
-       uint64_t gpu_addr;
-       int ret;
-
-       ret = radeon_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       /* Only 27 bit offset for legacy cursor */
-       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
-                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
-                                      &gpu_addr);
-       radeon_bo_unreserve(robj);
-       if (ret)
-               goto fail;
-
-       if (ASIC_IS_DCE4(rdev)) {
-               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
-                      upper_32_bits(gpu_addr));
-               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
-                      gpu_addr & 0xffffffff);
-       } else if (ASIC_IS_AVIVO(rdev)) {
-               if (rdev->family >= CHIP_RV770) {
-                       if (radeon_crtc->crtc_id)
-                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
-                       else
-                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
-               }
-               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
-                      gpu_addr & 0xffffffff);
-       } else {
-               radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
-               /* offset is from DISP(2)_BASE_ADDRESS */
-               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
-       }
-
-       return 0;
-
-fail:
-       drm_gem_object_unreference_unlocked(obj);
-
-       return ret;
-}
-
 int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                            struct drm_file *file_priv,
                            uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                            int32_t hot_y)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
+       struct radeon_bo *robj;
        int ret;
 
        if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
+       robj = gem_to_radeon_bo(obj);
+       ret = radeon_bo_reserve(robj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+       /* Only 27 bit offset for legacy cursor */
+       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+                                      &radeon_crtc->cursor_addr);
+       radeon_bo_unreserve(robj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        radeon_crtc->cursor_width = width;
        radeon_crtc->cursor_height = height;
 
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                radeon_crtc->cursor_hot_y = hot_y;
        }
 
-       ret = radeon_set_cursor(crtc, obj);
-
-       if (ret)
-               DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
-                         ret);
-       else
-               radeon_show_cursor(crtc);
+       radeon_show_cursor(crtc);
 
        radeon_lock_cursor(crtc, false);
 
@@ -341,8 +327,7 @@ unpin:
                        radeon_bo_unpin(robj);
                        radeon_bo_unreserve(robj);
                }
-               if (radeon_crtc->cursor_bo != obj)
-                       drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+               drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
        }
 
        radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
 void radeon_cursor_reset(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       int ret;
 
        if (radeon_crtc->cursor_bo) {
                radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
                radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
                                          radeon_crtc->cursor_y);
 
-               ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
-               if (ret)
-                       DRM_ERROR("radeon_set_cursor returned %d, not showing "
-                                 "cursor\n", ret);
-               else
-                       radeon_show_cursor(crtc);
+               radeon_show_cursor(crtc);
 
                radeon_lock_cursor(crtc, false);
        }
index 2593b1168bd6ff09a8dbfa966e75d7445478be70..d8319dae8358846cfc4b8121e7786dbd8c6d17fa 100644 (file)
@@ -1079,6 +1079,22 @@ static bool radeon_check_pot_argument(int arg)
        return (arg & (arg - 1)) == 0;
 }
 
+/**
+ * Determine a sensible default GART size according to ASIC family.
+ *
+ * @family ASIC family name
+ */
+static int radeon_gart_size_auto(enum radeon_family family)
+{
+       /* default to a larger gart size on newer asics */
+       if (family >= CHIP_TAHITI)
+               return 2048;
+       else if (family >= CHIP_RV770)
+               return 1024;
+       else
+               return 512;
+}
+
 /**
  * radeon_check_arguments - validate module params
  *
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
        }
 
        if (radeon_gart_size == -1) {
-               /* default to a larger gart size on newer asics */
-               if (rdev->family >= CHIP_RV770)
-                       radeon_gart_size = 1024;
-               else
-                       radeon_gart_size = 512;
+               radeon_gart_size = radeon_gart_size_auto(rdev->family);
        }
        /* gtt size must be power of two and greater or equal to 32M */
        if (radeon_gart_size < 32) {
                dev_warn(rdev->dev, "gart size (%d) too small\n",
                                radeon_gart_size);
-               if (rdev->family >= CHIP_RV770)
-                       radeon_gart_size = 1024;
-               else
-                       radeon_gart_size = 512;
+               radeon_gart_size = radeon_gart_size_auto(rdev->family);
        } else if (!radeon_check_pot_argument(radeon_gart_size)) {
                dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
                                radeon_gart_size);
-               if (rdev->family >= CHIP_RV770)
-                       radeon_gart_size = 1024;
-               else
-                       radeon_gart_size = 512;
+               radeon_gart_size = radeon_gart_size_auto(rdev->family);
        }
        rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
 
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
        }
 
-       /* unpin the front buffers */
+       /* unpin the front buffers and cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
                struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
                struct radeon_bo *robj;
 
+               if (radeon_crtc->cursor_bo) {
+                       struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+                       r = radeon_bo_reserve(robj, false);
+                       if (r == 0) {
+                               radeon_bo_unpin(robj);
+                               radeon_bo_unreserve(robj);
+                       }
+               }
+
                if (rfb == NULL || rfb->obj == NULL) {
                        continue;
                }
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
        struct drm_connector *connector;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_crtc *crtc;
        int r;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 
        radeon_restore_bios_scratch_regs(rdev);
 
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+               if (radeon_crtc->cursor_bo) {
+                       struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+                       r = radeon_bo_reserve(robj, false);
+                       if (r == 0) {
+                               /* Only 27 bit offset for legacy cursor */
+                               r = radeon_bo_pin_restricted(robj,
+                                                            RADEON_GEM_DOMAIN_VRAM,
+                                                            ASIC_IS_AVIVO(rdev) ?
+                                                            0 : 1 << 27,
+                                                            &radeon_crtc->cursor_addr);
+                               if (r != 0)
+                                       DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+                               radeon_bo_unreserve(robj);
+                       }
+               }
+       }
+
        /* init dig PHYs, disp eng pll */
        if (rdev->is_atom_bios) {
                radeon_atom_encoder_init(rdev);
index 634793ea841889847ac090c32470548a1cae418d..aeb676708e60cfb1871326bfc5a689631bb98741 100644 (file)
@@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
        }
 
        info->par = rfbdev;
+       info->skip_vt_switch = true;
 
        ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
        if (ret) {
index ac3c1310b953182acb0db6db41add071fd88e737..013ec7106e555a2862c4d8b7194e25eaaabc7572 100644 (file)
@@ -428,7 +428,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
-       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -440,10 +439,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        }
        robj = gem_to_radeon_bo(gobj);
-       r = radeon_bo_wait(robj, &cur_placement, true);
+
+       r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+       if (r == 0)
+               r = -EBUSY;
+       else
+               r = 0;
+
+       cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
        args->domain = radeon_mem_type_to_domain(cur_placement);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
@@ -471,6 +476,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                r = ret;
 
        /* Flush HDP cache via MMIO if necessary */
+       cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
        if (rdev->asic->mmio_hdp_flush &&
            radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
                robj->rdev->asic->mmio_hdp_flush(rdev);
index 6de5459316b53c60b41449c833b15dab31c0b45a..07909d817381a21a18fdeb5dd7cd8b4be92fbc8e 100644 (file)
@@ -343,7 +343,6 @@ struct radeon_crtc {
        int max_cursor_width;
        int max_cursor_height;
        uint32_t legacy_display_base_addr;
-       uint32_t legacy_cursor_offset;
        enum radeon_rmx_type rmx_type;
        u8 h_border;
        u8 v_border;
index ec10533a49b87a905aa82eda96e48bddae32d87f..48d97c040f495cc69c0d890b5a5e6b4fee587a62 100644 (file)
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
        }
 
        if (bo_va->it.start || bo_va->it.last) {
-               spin_lock(&vm->status_lock);
-               if (list_empty(&bo_va->vm_status)) {
-                       /* add a clone of the bo_va to clear the old address */
-                       struct radeon_bo_va *tmp;
-                       spin_unlock(&vm->status_lock);
-                       tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
-                       if (!tmp) {
-                               mutex_unlock(&vm->mutex);
-                               r = -ENOMEM;
-                               goto error_unreserve;
-                       }
-                       tmp->it.start = bo_va->it.start;
-                       tmp->it.last = bo_va->it.last;
-                       tmp->vm = vm;
-                       tmp->bo = radeon_bo_ref(bo_va->bo);
-                       spin_lock(&vm->status_lock);
-                       list_add(&tmp->vm_status, &vm->freed);
+               /* add a clone of the bo_va to clear the old address */
+               struct radeon_bo_va *tmp;
+               tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+               if (!tmp) {
+                       mutex_unlock(&vm->mutex);
+                       r = -ENOMEM;
+                       goto error_unreserve;
                }
-               spin_unlock(&vm->status_lock);
+               tmp->it.start = bo_va->it.start;
+               tmp->it.last = bo_va->it.last;
+               tmp->vm = vm;
+               tmp->bo = radeon_bo_ref(bo_va->bo);
 
                interval_tree_remove(&bo_va->it, &vm->va);
+               spin_lock(&vm->status_lock);
                bo_va->it.start = 0;
                bo_va->it.last = 0;
+               list_del_init(&bo_va->vm_status);
+               list_add(&tmp->vm_status, &vm->freed);
+               spin_unlock(&vm->status_lock);
        }
 
        if (soffset || eoffset) {
+               spin_lock(&vm->status_lock);
                bo_va->it.start = soffset;
                bo_va->it.last = eoffset - 1;
-               interval_tree_insert(&bo_va->it, &vm->va);
-               spin_lock(&vm->status_lock);
                list_add(&bo_va->vm_status, &vm->cleared);
                spin_unlock(&vm->status_lock);
+               interval_tree_insert(&bo_va->it, &vm->va);
        }
 
        bo_va->flags = flags;
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 
        list_for_each_entry(bo_va, &bo->va, bo_list) {
                spin_lock(&bo_va->vm->status_lock);
-               if (list_empty(&bo_va->vm_status))
+               if (list_empty(&bo_va->vm_status) &&
+                   (bo_va->it.start || bo_va->it.last))
                        list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
                spin_unlock(&bo_va->vm->status_lock);
        }
index 26388b5dd6edd1dd50c7d0fd947280f00cf3b19d..07037e32dea327f935ef39e0bdafea8fb640b39a 100644 (file)
@@ -6466,23 +6466,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
                case 2: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D2 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
                case 3: /* D3 vblank/vline */
                        switch (src_data) {
                        case 0: /* D3 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[2]) {
-                                               drm_handle_vblank(rdev->ddev, 2);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[2]))
-                                               radeon_crtc_handle_vblank(rdev, 2);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[2]) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[2]))
+                                       radeon_crtc_handle_vblank(rdev, 2);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vblank\n");
+
                                break;
                        case 1: /* D3 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
                case 4: /* D4 vblank/vline */
                        switch (src_data) {
                        case 0: /* D4 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[3]) {
-                                               drm_handle_vblank(rdev->ddev, 3);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[3]))
-                                               radeon_crtc_handle_vblank(rdev, 3);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[3]) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[3]))
+                                       radeon_crtc_handle_vblank(rdev, 3);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vblank\n");
+
                                break;
                        case 1: /* D4 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
                case 5: /* D5 vblank/vline */
                        switch (src_data) {
                        case 0: /* D5 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[4]) {
-                                               drm_handle_vblank(rdev->ddev, 4);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[4]))
-                                               radeon_crtc_handle_vblank(rdev, 4);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[4]) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[4]))
+                                       radeon_crtc_handle_vblank(rdev, 4);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vblank\n");
+
                                break;
                        case 1: /* D5 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
                case 6: /* D6 vblank/vline */
                        switch (src_data) {
                        case 0: /* D6 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[5]) {
-                                               drm_handle_vblank(rdev->ddev, 5);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[5]))
-                                               radeon_crtc_handle_vblank(rdev, 5);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[5]) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[5]))
+                                       radeon_crtc_handle_vblank(rdev, 5);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vblank\n");
+
                                break;
                        case 1: /* D6 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
+
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
+
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
+
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
+
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
+
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
+
                                break;
                        case 6:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 1\n");
+
                                break;
                        case 7:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 2\n");
+
                                break;
                        case 8:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 3\n");
+
                                break;
                        case 9:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 4\n");
+
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 5\n");
+
                                break;
                        case 11:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 6\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
index 35ac23768ce9662fc513cdf8d0cad3f2f8447f4d..577d58d1f1a19881a23063cccf39cf5158d88520 100644 (file)
@@ -633,6 +633,7 @@ config I2C_MPC
 config I2C_MT65XX
        tristate "MediaTek I2C adapter"
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on HAS_DMA
        help
          This selects the MediaTek(R) Integrated Inter Circuit bus driver
          for MT65xx and MT81xx.
index 19b2d689a5ef72d76f2745bb3cb35dad78e61ad2..f325663c27c532645a1901a385ac6d52543e5ba5 100644 (file)
@@ -764,12 +764,15 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
        if (IS_ERR(i2c->clk))
                return PTR_ERR(i2c->clk);
 
-       clk_prepare_enable(i2c->clk);
+       ret = clk_prepare_enable(i2c->clk);
+       if (ret)
+               return ret;
 
-       if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
-                                &clk_freq)) {
+       ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+                                  &clk_freq);
+       if (ret) {
                dev_err(&pdev->dev, "clock-frequency not specified in DT");
-               return clk_freq;
+               goto err;
        }
 
        i2c->speed = clk_freq / 1000;
@@ -790,10 +793,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
        i2c->irq = platform_get_irq(pdev, 0);
        ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
                               dev_name(&pdev->dev), i2c);
-       if (ret) {
-               ret = -ENODEV;
+       if (ret)
                goto err;
-       }
 
        ret = i2c_add_adapter(&i2c->adap);
        if (ret < 0) {
index dcca7076231eda0d458916963d6f61eaf7ebfacb..1c9cb65ac4cf8aeb83becaec867f9a34f28b5a1e 100644 (file)
@@ -419,6 +419,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
        rc = i2c_add_adapter(adapter);
        if (rc) {
                dev_err(&pdev->dev, "Adapter registeration failed\n");
+               mbox_free_channel(ctx->mbox_chan);
                return rc;
        }
 
index 069a41f116ddcd0831d2adb65acac6709ee9e8c6..e6d4935161e4902762f6042847838428ec34faf2 100644 (file)
@@ -1012,6 +1012,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
  */
 void i2c_unregister_device(struct i2c_client *client)
 {
+       if (client->dev.of_node)
+               of_node_clear_flag(client->dev.of_node, OF_POPULATED);
        device_unregister(&client->dev);
 }
 EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1320,8 +1322,11 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
 
        dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
 
-       for_each_available_child_of_node(adap->dev.of_node, node)
+       for_each_available_child_of_node(adap->dev.of_node, node) {
+               if (of_node_test_and_set_flag(node, OF_POPULATED))
+                       continue;
                of_i2c_register_device(adap, node);
+       }
 }
 
 static int of_dev_node_match(struct device *dev, void *data)
@@ -1853,6 +1858,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
                if (adap == NULL)
                        return NOTIFY_OK;       /* not for us */
 
+               if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+                       put_device(&adap->dev);
+                       return NOTIFY_OK;
+               }
+
                client = of_i2c_register_device(adap, rd->dn);
                put_device(&adap->dev);
 
@@ -1863,6 +1873,10 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
                }
                break;
        case OF_RECONFIG_CHANGE_REMOVE:
+               /* already depopulated? */
+               if (!of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
                /* find our device by node */
                client = of_find_i2c_device_by_node(rd->dn);
                if (client == NULL)
index 62641f2adaf74d2a3e5ba2473ac00b9a8984cb28..5b5f403d8ce6a80a3c458e6537ee0baedf38073e 100644 (file)
@@ -771,7 +771,7 @@ static const struct attribute_group *elan_sysfs_groups[] = {
  */
 static void elan_report_contact(struct elan_tp_data *data,
                                int contact_num, bool contact_valid,
-                               bool hover_event, u8 *finger_data)
+                               u8 *finger_data)
 {
        struct input_dev *input = data->input;
        unsigned int pos_x, pos_y;
@@ -815,9 +815,7 @@ static void elan_report_contact(struct elan_tp_data *data,
                input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
                input_report_abs(input, ABS_MT_POSITION_X, pos_x);
                input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
-               input_report_abs(input, ABS_MT_DISTANCE, hover_event);
-               input_report_abs(input, ABS_MT_PRESSURE,
-                                hover_event ? 0 : scaled_pressure);
+               input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
                input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
                input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
                input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -839,14 +837,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
        hover_event = hover_info & 0x40;
        for (i = 0; i < ETP_MAX_FINGERS; i++) {
                contact_valid = tp_info & (1U << (3 + i));
-               elan_report_contact(data, i, contact_valid, hover_event,
-                                   finger_data);
+               elan_report_contact(data, i, contact_valid, finger_data);
 
                if (contact_valid)
                        finger_data += ETP_FINGER_DATA_LEN;
        }
 
        input_report_key(input, BTN_LEFT, tp_info & 0x01);
+       input_report_abs(input, ABS_DISTANCE, hover_event != 0);
        input_mt_report_pointer_emulation(input, true);
        input_sync(input);
 }
@@ -922,6 +920,7 @@ static int elan_setup_input_device(struct elan_tp_data *data)
        input_abs_set_res(input, ABS_Y, data->y_res);
        input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
        input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+       input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
 
        /* And MT parameters */
        input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
@@ -934,7 +933,6 @@ static int elan_setup_input_device(struct elan_tp_data *data)
                             ETP_FINGER_WIDTH * max_width, 0, 0);
        input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
                             ETP_FINGER_WIDTH * min_width, 0, 0);
-       input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
 
        data->input = input;
 
index 35c8d0ceabeebf989b8eeff5cd54ee8f3ac2e247..3a32caf06bf1dae9b9fb678947bb9c7243fc25da 100644 (file)
@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
                                        ABS_MT_POSITION_Y);
                /* Image sensors can report per-contact pressure */
                input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
-               input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
+               input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
 
                /* Image sensors can signal 4 and 5 finger clicks */
                __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
index 8d7e1c8b6d566cb385c5cc92ff67c24f40692c9a..4dd88264dff55c0c95efd813c82fe762d7216263 100644 (file)
@@ -1055,7 +1055,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
 
        processor = (struct acpi_madt_generic_interrupt *)header;
 
-       if (BAD_MADT_ENTRY(processor, end))
+       if (BAD_MADT_GICC_ENTRY(processor, end))
                return -EINVAL;
 
        /*
index 4400edd1a6c729129357f5df0bdb37f7e8f018db..b7d54d428b5e55d1520d52e68b95202593cf4b53 100644 (file)
@@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void)
                return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
        }
 
-       /*
-        * Some cores claim the FDC is routable but it doesn't actually seem to
-        * be connected.
-        */
-       switch (current_cpu_type()) {
-       case CPU_INTERAPTIV:
-       case CPU_PROAPTIV:
-               return -1;
-       }
-
        return irq_create_mapping(gic_irq_domain,
                                  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
 }
index 8911e51d410ab3c6e8d14af25fb5c1ff7751b47d..3a27a84ad3ec376a2543c1ac9568c30e5d7c131b 100644 (file)
@@ -2074,14 +2074,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
                        ret = gpmc_probe_nand_child(pdev, child);
                else if (of_node_cmp(child->name, "onenand") == 0)
                        ret = gpmc_probe_onenand_child(pdev, child);
-               else if (of_node_cmp(child->name, "ethernet") == 0 ||
-                        of_node_cmp(child->name, "nor") == 0 ||
-                        of_node_cmp(child->name, "uart") == 0)
+               else
                        ret = gpmc_probe_generic_child(pdev, child);
-
-               if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
-                        __func__, child->full_name))
-                       of_node_put(child);
        }
 
        return 0;
index 0c77240ae2fce8cbb832388c20f294f49763e03e..729e0851167df9d441a5d26b34256029dd6db3c8 100644 (file)
@@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 
        afu = cxl_pci_to_afu(dev);
 
+       get_device(&afu->dev);
        ctx = cxl_context_alloc();
        if (IS_ERR(ctx))
                return ctx;
@@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
        rc = cxl_context_init(ctx, afu, false, NULL);
        if (rc) {
                kfree(ctx);
+               put_device(&afu->dev);
                return ERR_PTR(-ENOMEM);
        }
        cxl_assign_psn_space(ctx);
@@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx)
        if (ctx->status != CLOSED)
                return -EBUSY;
 
+       put_device(&ctx->afu->dev);
+
        cxl_context_free(ctx);
 
        return 0;
@@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
        }
 
        ctx->status = STARTED;
-       get_device(&ctx->afu->dev);
 out:
        mutex_unlock(&ctx->status_mutex);
        return rc;
@@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element);
 /* Stop a context.  Returns 0 on success, otherwise -Errno */
 int cxl_stop_context(struct cxl_context *ctx)
 {
-       int rc;
-
-       rc = __detach_context(ctx);
-       if (!rc)
-               put_device(&ctx->afu->dev);
-       return rc;
+       return __detach_context(ctx);
 }
 EXPORT_SYMBOL_GPL(cxl_stop_context);
 
index 2a4c80ac322ad2500a13fc6162d8920ac4a6b8db..1287148629c0d6f0b6e6066d4c883d8dfaebf9da 100644 (file)
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
                area = ctx->afu->psn_phys;
-               if (offset > ctx->afu->adapter->ps_size)
+               if (offset >= ctx->afu->adapter->ps_size)
                        return VM_FAULT_SIGBUS;
        } else {
                area = ctx->psn_phys;
-               if (offset > ctx->psn_size)
+               if (offset >= ctx->psn_size)
                        return VM_FAULT_SIGBUS;
        }
 
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
  */
 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
 {
+       u64 start = vma->vm_pgoff << PAGE_SHIFT;
        u64 len = vma->vm_end - vma->vm_start;
-       len = min(len, ctx->psn_size);
+
+       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+               if (start + len > ctx->afu->adapter->ps_size)
+                       return -EINVAL;
+       } else {
+               if (start + len > ctx->psn_size)
+                       return -EINVAL;
+       }
 
        if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
                /* make sure there is a valid per process space for this AFU */
index 833348e2c9cbc161128d2666f0329e4b18ea536b..4a164ab8b35a65d864c1d83ebe72312624b15c4c 100644 (file)
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
                spin_lock(&adapter->afu_list_lock);
                for (slice = 0; slice < adapter->slices; slice++) {
                        afu = adapter->afu[slice];
-                       if (!afu->enabled)
+                       if (!afu || !afu->enabled)
                                continue;
                        rcu_read_lock();
                        idr_for_each_entry(&afu->contexts_idr, ctx, id)
index c68ef5806dbe122503742f57c6d1259a599b7cf7..32ad097059498c4d5b6f8d2f4a9bb671fbf872a4 100644 (file)
@@ -539,7 +539,7 @@ err:
 
 static void cxl_unmap_slice_regs(struct cxl_afu *afu)
 {
-       if (afu->p1n_mmio)
+       if (afu->p2n_mmio)
                iounmap(afu->p2n_mmio);
        if (afu->p1n_mmio)
                iounmap(afu->p1n_mmio);
index b1d1983a84a5d8c665bf0b66880e4f6994645d95..2eba002b580b4c9f2c07de079c7aa060c3b7cb6e 100644 (file)
@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
        unsigned long addr;
 
        phb = pci_bus_to_host(bus);
-       afu = (struct cxl_afu *)phb->private_data;
        if (phb == NULL)
                return PCIBIOS_DEVICE_NOT_FOUND;
+       afu = (struct cxl_afu *)phb->private_data;
+
        if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
                return PCIBIOS_DEVICE_NOT_FOUND;
        if (offset >= (unsigned long)phb->cfg_data)
index 357b6ae4d207d785176c22d19a2c572617b8220f..458aa5a09c522816febc6abef20d034a67cfea5d 100644 (file)
@@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
        schedule_work(&device->event_work);
 }
 
-void mei_cl_bus_remove_devices(struct mei_device *dev)
-{
-       struct mei_cl *cl, *next;
-
-       mutex_lock(&dev->device_lock);
-       list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
-               if (cl->device)
-                       mei_cl_remove_device(cl->device);
-
-               list_del(&cl->device_link);
-               mei_cl_unlink(cl);
-               kfree(cl);
-       }
-       mutex_unlock(&dev->device_lock);
-}
-
 int __init mei_cl_bus_init(void)
 {
        return bus_register(&mei_cl_bus_type);
index 94514b2c7a50277ba4adf8604ce6084b01309967..00c3865ca3b1d42042d78d5558eac547ea8c03a8 100644 (file)
@@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev)
 
        mei_nfc_host_exit(dev);
 
-       mei_cl_bus_remove_devices(dev);
-
        mutex_lock(&dev->device_lock);
 
        mei_wd_stop(dev);
index b983c4ecad3800d34e21a453e034ed15d84574f4..290ef3037437816114e6d373d4889c6ee2855c1b 100644 (file)
@@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev)
 
        cldev->priv_data = NULL;
 
-       mutex_lock(&dev->device_lock);
        /* Need to remove the device here
         * since mei_nfc_free will unlink the clients
         */
        mei_cl_remove_device(cldev);
+
+       mutex_lock(&dev->device_lock);
        mei_nfc_free(ndev);
        mutex_unlock(&dev->device_lock);
 }
index 19eb990d398c03affd47a3a50e01ad0571427aa8..317a49480475d2c87fb5a4aa56716844ce21005a 100644 (file)
@@ -689,40 +689,57 @@ out:
 
 }
 
-static bool bond_should_change_active(struct bonding *bond)
+static struct slave *bond_choose_primary_or_current(struct bonding *bond)
 {
        struct slave *prim = rtnl_dereference(bond->primary_slave);
        struct slave *curr = rtnl_dereference(bond->curr_active_slave);
 
-       if (!prim || !curr || curr->link != BOND_LINK_UP)
-               return true;
+       if (!prim || prim->link != BOND_LINK_UP) {
+               if (!curr || curr->link != BOND_LINK_UP)
+                       return NULL;
+               return curr;
+       }
+
        if (bond->force_primary) {
                bond->force_primary = false;
-               return true;
+               return prim;
+       }
+
+       if (!curr || curr->link != BOND_LINK_UP)
+               return prim;
+
+       /* At this point, prim and curr are both up */
+       switch (bond->params.primary_reselect) {
+       case BOND_PRI_RESELECT_ALWAYS:
+               return prim;
+       case BOND_PRI_RESELECT_BETTER:
+               if (prim->speed < curr->speed)
+                       return curr;
+               if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
+                       return curr;
+               return prim;
+       case BOND_PRI_RESELECT_FAILURE:
+               return curr;
+       default:
+               netdev_err(bond->dev, "impossible primary_reselect %d\n",
+                          bond->params.primary_reselect);
+               return curr;
        }
-       if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
-           (prim->speed < curr->speed ||
-            (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
-               return false;
-       if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
-               return false;
-       return true;
 }
 
 /**
- * find_best_interface - select the best available slave to be the active one
+ * bond_find_best_slave - select the best available slave to be the active one
  * @bond: our bonding struct
  */
 static struct slave *bond_find_best_slave(struct bonding *bond)
 {
-       struct slave *slave, *bestslave = NULL, *primary;
+       struct slave *slave, *bestslave = NULL;
        struct list_head *iter;
        int mintime = bond->params.updelay;
 
-       primary = rtnl_dereference(bond->primary_slave);
-       if (primary && primary->link == BOND_LINK_UP &&
-           bond_should_change_active(bond))
-               return primary;
+       slave = bond_choose_primary_or_current(bond);
+       if (slave)
+               return slave;
 
        bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP)
index 041525d2595ced06ee4363bb09f5fe62dfe5b7ec..5d214d1353320856cf3de91d837b910df3f3de24 100644 (file)
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
        int err;
+       struct pinctrl *p;
 
        /* basic c_can configuration */
        err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-       /* activate pins */
-       pinctrl_pm_select_default_state(dev->dev.parent);
+       /* Attempt to use "active" if available else use "default" */
+       p = pinctrl_get_select(priv->device, "active");
+       if (!IS_ERR(p))
+               pinctrl_put(p);
+       else
+               pinctrl_pm_select_default_state(priv->device);
+
        return 0;
 }
 
index e9b1810d319f0c22fe7760cde4742a879f7ac190..aede704605c66973c6bc3697a91c90f743990a01 100644 (file)
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
                struct can_frame *cf = (struct can_frame *)skb->data;
                u8 dlc = cf->can_dlc;
 
-               if (!(skb->tstamp.tv64))
-                       __net_timestamp(skb);
-
                netif_rx(priv->echo_skb[idx]);
                priv->echo_skb[idx] = NULL;
 
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
        if (unlikely(!skb))
                return NULL;
 
-       __net_timestamp(skb);
        skb->protocol = htons(ETH_P_CAN);
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
        memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
        if (unlikely(!skb))
                return NULL;
 
-       __net_timestamp(skb);
        skb->protocol = htons(ETH_P_CANFD);
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
        memset(*cfd, 0, sizeof(struct canfd_frame));
index 7deb80dcbe8c09b669f15e3f5dfb3af3fc72598e..7bd54191f962a4ad3079eabdcefb0d001a14bf0b 100644 (file)
@@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev)
 
        err = clk_prepare_enable(priv->clk);
        if (err) {
-               netdev_err(ndev, "failed to enable periperal clock, error %d\n",
+               netdev_err(ndev,
+                          "failed to enable peripheral clock, error %d\n",
                           err);
                goto out;
        }
@@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev)
        napi_enable(&priv->napi);
        err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
        if (err) {
-               netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+               netdev_err(ndev, "request_irq(%d) failed, error %d\n",
+                          ndev->irq, err);
                goto out_close;
        }
        can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq) {
+       if (irq < 0) {
                dev_err(&pdev->dev, "No IRQ resource\n");
+               err = irq;
                goto fail;
        }
 
@@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev)
        priv->clk = devm_clk_get(&pdev->dev, "clkp1");
        if (IS_ERR(priv->clk)) {
                err = PTR_ERR(priv->clk);
-               dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err);
+               dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+                       err);
                goto fail_clk;
        }
 
@@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev)
        priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
        if (IS_ERR(priv->can_clk)) {
                err = PTR_ERR(priv->can_clk);
-               dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err);
+               dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
                goto fail_clk;
        }
 
@@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev)
 
        devm_can_led_init(ndev);
 
-       dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+       dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
                 priv->regs, ndev->irq);
 
        return 0;
index f64f5290d6f8f41bd8a4ade0cf81be2811d98911..a23a7af8eb9a0ccfdffc39518ee9ee7cd43485d1 100644 (file)
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
        if (!skb)
                return;
 
-       __net_timestamp(skb);
        skb->dev = sl->dev;
        skb->protocol = htons(ETH_P_CAN);
        skb->pkt_type = PACKET_BROADCAST;
@@ -215,6 +214,7 @@ static void slc_bump(struct slcan *sl)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        memcpy(skb_put(skb, sizeof(struct can_frame)),
               &cf, sizeof(struct can_frame));
index 0ce868de855dfeb28533def7b47142d671836e9b..674f367087c54ac168d2c0283b1361a1c98d3696 100644 (file)
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
        skb->dev       = dev;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       if (!(skb->tstamp.tv64))
-               __net_timestamp(skb);
-
        netif_rx_ni(skb);
 }
 
index 41095ebad97fe15e399d00c293882d542c844b25..2d1ce3c5d0dd34c9fabb1399a32c49be744afdd1 100644 (file)
@@ -2382,6 +2382,7 @@ boomerang_interrupt(int irq, void *dev_id)
        void __iomem *ioaddr;
        int status;
        int work_done = max_interrupt_work;
+       int handled = 0;
 
        ioaddr = vp->ioaddr;
 
@@ -2400,6 +2401,7 @@ boomerang_interrupt(int irq, void *dev_id)
 
        if ((status & IntLatch) == 0)
                goto handler_exit;              /* No interrupt: shared IRQs can cause this */
+       handled = 1;
 
        if (status == 0xffff) {         /* h/w no longer present (hotplug)? */
                if (vortex_debug > 1)
@@ -2501,7 +2503,7 @@ boomerang_interrupt(int irq, void *dev_id)
 handler_exit:
        vp->handling_irq = 0;
        spin_unlock(&vp->lock);
-       return IRQ_HANDLED;
+       return IRQ_RETVAL(handled);
 }
 
 static int vortex_rx(struct net_device *dev)
index 661cdaa7ea96c26ebc6142c955f12dd19296e49f..b3bc87fe3764e397e4a9ce19518866cb97530771 100644 (file)
@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
        get_page(pa->pages);
        bd->pa = *pa;
 
-       bd->dma = pa->pages_dma + pa->pages_offset;
+       bd->dma_base = pa->pages_dma;
+       bd->dma_off = pa->pages_offset;
        bd->dma_len = len;
 
        pa->pages_offset += len;
index 506e832c9e9a80f8e6da438ac4e2cb42a49435a7..a4473d8ff4fa0e1ec7bbdb511f9edd51f1871d71 100644 (file)
@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
        unsigned int rx_usecs = pdata->rx_usecs;
        unsigned int rx_frames = pdata->rx_frames;
        unsigned int inte;
+       dma_addr_t hdr_dma, buf_dma;
 
        if (!rx_usecs && !rx_frames) {
                /* No coalescing, interrupt for every descriptor */
@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
         *   Set buffer 2 (hi) address to buffer dma address (hi) and
         *     set control bits OWN and INTE
         */
-       rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
-       rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
-       rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
-       rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+       hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+       buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+       rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+       rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+       rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+       rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
 
        XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
 
index 1e9c28d19ef88ccd4e0c8b45bf7ee9351c03935c..aae9d5ecd1822b16a2812de3bee503f59113adaa 100644 (file)
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
        /* Start with the header buffer which may contain just the header
         * or the header plus data
         */
-       dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
-                               rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+       dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+                                     rdata->rx.hdr.dma_off,
+                                     rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
 
        packet = page_address(rdata->rx.hdr.pa.pages) +
                 rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
        len -= copy_len;
        if (len) {
                /* Add the remaining data as a frag */
-               dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
-                                       rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+               dma_sync_single_range_for_cpu(pdata->dev,
+                                             rdata->rx.buf.dma_base,
+                                             rdata->rx.buf.dma_off,
+                                             rdata->rx.buf.dma_len,
+                                             DMA_FROM_DEVICE);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
                                if (!skb)
                                        error = 1;
                        } else if (rdesc_len) {
-                               dma_sync_single_for_cpu(pdata->dev,
-                                                       rdata->rx.buf.dma,
+                               dma_sync_single_range_for_cpu(pdata->dev,
+                                                       rdata->rx.buf.dma_base,
+                                                       rdata->rx.buf.dma_off,
                                                        rdata->rx.buf.dma_len,
                                                        DMA_FROM_DEVICE);
 
index 63d72a140053956c35b82b2982497db2fbf1e440..717ce21b60776b1e5162833a83aefa1fa95081b1 100644 (file)
@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
        struct xgbe_page_alloc pa;
        struct xgbe_page_alloc pa_unmap;
 
-       dma_addr_t dma;
+       dma_addr_t dma_base;
+       unsigned long dma_off;
        unsigned int dma_len;
 };
 
index 909ad7a0d48088fcaa8295dfdd3b3617becf8d12..4566cdf0bc398e977b310729a3ab129a190bc9b1 100644 (file)
@@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        macaddr = of_get_mac_address(dn);
        if (!macaddr || !is_valid_ether_addr(macaddr)) {
                dev_warn(&pdev->dev, "using random Ethernet MAC\n");
-               random_ether_addr(dev->dev_addr);
+               eth_hw_addr_random(dev);
        } else {
                ether_addr_copy(dev->dev_addr, macaddr);
        }
index b43b2cb9b830bfc64c1586fb4058f06596671f6e..64c1e9db6b0b5420687c1c083cc20b8adb7de5bd 100644 (file)
@@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
                new_skb = skb_realloc_headroom(skb, sizeof(*status));
                dev_kfree_skb(skb);
                if (!new_skb) {
-                       dev->stats.tx_errors++;
                        dev->stats.tx_dropped++;
                        return NULL;
                }
@@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
 
                if (unlikely(!skb)) {
                        dev->stats.rx_dropped++;
-                       dev->stats.rx_errors++;
                        goto next;
                }
 
@@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
                                  "dropping fragmented packet!\n");
-                       dev->stats.rx_dropped++;
                        dev->stats.rx_errors++;
                        dev_kfree_skb_any(skb);
                        goto next;
@@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                                dev->stats.rx_frame_errors++;
                        if (dma_flag & DMA_RX_LG)
                                dev->stats.rx_length_errors++;
-                       dev->stats.rx_dropped++;
                        dev->stats.rx_errors++;
                        dev_kfree_skb_any(skb);
                        goto next;
index 484eb8c374890ff951bf270935e7f45a7db23d18..a11485fbb33f2b7bcd6c973324ea41601dbaf575 100644 (file)
@@ -952,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v)
                 * eventually have to put a format interpreter in here ...
                 */
                seq_printf(seq, "%10d  %15llu  %8s  %8s  ",
-                          e->seqno, e->timestamp,
+                          be32_to_cpu(e->seqno),
+                          be64_to_cpu(e->timestamp),
                           (e->level < ARRAY_SIZE(devlog_level_strings)
                            ? devlog_level_strings[e->level]
                            : "UNKNOWN"),
                           (e->facility < ARRAY_SIZE(devlog_facility_strings)
                            ? devlog_facility_strings[e->facility]
                            : "UNKNOWN"));
-               seq_printf(seq, e->fmt, e->params[0], e->params[1],
-                          e->params[2], e->params[3], e->params[4],
-                          e->params[5], e->params[6], e->params[7]);
+               seq_printf(seq, e->fmt,
+                          be32_to_cpu(e->params[0]),
+                          be32_to_cpu(e->params[1]),
+                          be32_to_cpu(e->params[2]),
+                          be32_to_cpu(e->params[3]),
+                          be32_to_cpu(e->params[4]),
+                          be32_to_cpu(e->params[5]),
+                          be32_to_cpu(e->params[6]),
+                          be32_to_cpu(e->params[7]));
        }
        return 0;
 }
@@ -1043,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file)
                return ret;
        }
 
-       /* Translate log multi-byte integral elements into host native format
-        * and determine where the first entry in the log is.
+       /* Find the earliest (lowest Sequence Number) log entry in the
+        * circular Device Log.
         */
        for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
                struct fw_devlog_e *e = &dinfo->log[index];
-               int i;
                __u32 seqno;
 
                if (e->timestamp == 0)
                        continue;
 
-               e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
                seqno = be32_to_cpu(e->seqno);
-               for (i = 0; i < 8; i++)
-                       e->params[i] =
-                               (__force __be32)be32_to_cpu(e->params[i]);
-
                if (seqno < fseqno) {
                        fseqno = seqno;
                        dinfo->first = index;
index da2004e2a74176959ece42065a26267aa2924a2b..918a8e42139b1f8ba9d95a4a9a1300c014324db8 100644 (file)
@@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                                                 wq_work_done,
                                                 0 /* dont unmask intr */,
                                                 0 /* dont reset intr timer */);
-               return rq_work_done;
+               return budget;
        }
 
        if (budget > 0)
@@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                        0 /* don't reset intr timer */);
 
        err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+       enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
 
        /* Buffer allocation failed. Stay in polling
         * mode so we can try to fill the ring again.
@@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                vnic_intr_unmask(&enic->intr[intr]);
        }
-       enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
 
        return rq_work_done;
 }
index 1f89c59b43535f9b65e946c7468cb1fcb13a2022..42e20e5385acb553b528ee8a6b275791e9ad1b44 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/pm_runtime.h>
 #include <linux/ptrace.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 #define FEC_ENET_RAEM_V        0x8
 #define FEC_ENET_RAFL_V        0x8
 #define FEC_ENET_OPD_V 0xFFF0
+#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
 
 static struct platform_device_id fec_devtype[] = {
        {
@@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
        struct fec_enet_private *fep = bus->priv;
+       struct device *dev = &fep->pdev->dev;
        unsigned long time_left;
+       int ret = 0;
+
+       ret = pm_runtime_get_sync(dev);
+       if (IS_ERR_VALUE(ret))
+               return ret;
 
        fep->mii_timeout = 0;
        init_completion(&fep->mdio_done);
@@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
        if (time_left == 0) {
                fep->mii_timeout = 1;
                netdev_err(fep->netdev, "MDIO read timeout\n");
-               return -ETIMEDOUT;
+               ret = -ETIMEDOUT;
+               goto out;
        }
 
-       /* return value */
-       return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+       ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       return ret;
 }
 
 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                           u16 value)
 {
        struct fec_enet_private *fep = bus->priv;
+       struct device *dev = &fep->pdev->dev;
        unsigned long time_left;
+       int ret = 0;
+
+       ret = pm_runtime_get_sync(dev);
+       if (IS_ERR_VALUE(ret))
+               return ret;
 
        fep->mii_timeout = 0;
        init_completion(&fep->mdio_done);
@@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        if (time_left == 0) {
                fep->mii_timeout = 1;
                netdev_err(fep->netdev, "MDIO write timeout\n");
-               return -ETIMEDOUT;
+               ret  = -ETIMEDOUT;
        }
 
-       return 0;
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       return ret;
 }
 
 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                ret = clk_prepare_enable(fep->clk_ahb);
                if (ret)
                        return ret;
-               ret = clk_prepare_enable(fep->clk_ipg);
-               if (ret)
-                       goto failed_clk_ipg;
                if (fep->clk_enet_out) {
                        ret = clk_prepare_enable(fep->clk_enet_out);
                        if (ret)
@@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                }
        } else {
                clk_disable_unprepare(fep->clk_ahb);
-               clk_disable_unprepare(fep->clk_ipg);
                if (fep->clk_enet_out)
                        clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
@@ -1874,8 +1893,6 @@ failed_clk_ptp:
        if (fep->clk_enet_out)
                clk_disable_unprepare(fep->clk_enet_out);
 failed_clk_enet_out:
-               clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
                clk_disable_unprepare(fep->clk_ahb);
 
        return ret;
@@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        int ret;
 
+       ret = pm_runtime_get_sync(&fep->pdev->dev);
+       if (IS_ERR_VALUE(ret))
+               return ret;
+
        pinctrl_pm_select_default_state(&fep->pdev->dev);
        ret = fec_enet_clk_enable(ndev, true);
        if (ret)
-               return ret;
+               goto clk_enable;
 
        /* I should reset the ring buffers here, but I don't yet know
         * a simple way to do that.
@@ -2881,6 +2902,9 @@ err_enet_mii_probe:
        fec_enet_free_buffers(ndev);
 err_enet_alloc:
        fec_enet_clk_enable(ndev, false);
+clk_enable:
+       pm_runtime_mark_last_busy(&fep->pdev->dev);
+       pm_runtime_put_autosuspend(&fep->pdev->dev);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        return ret;
 }
@@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
 
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+       pm_runtime_mark_last_busy(&fep->pdev->dev);
+       pm_runtime_put_autosuspend(&fep->pdev->dev);
+
        fec_enet_free_buffers(ndev);
 
        return 0;
@@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_clk;
 
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
                ret = regulator_enable(fep->reg_phy);
@@ -3434,6 +3465,8 @@ fec_probe(struct platform_device *pdev)
        netif_carrier_off(ndev);
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
 
        ret = register_netdev(ndev);
        if (ret)
@@ -3447,6 +3480,12 @@ fec_probe(struct platform_device *pdev)
 
        fep->rx_copybreak = COPYBREAK_DEFAULT;
        INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+       pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        return 0;
 
 failed_register:
@@ -3457,6 +3496,8 @@ failed_init:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 failed_regulator:
+       clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
 failed_phy:
@@ -3568,7 +3609,28 @@ failed_clk:
        return ret;
 }
 
-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       clk_disable_unprepare(fep->clk_ipg);
+
+       return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       return clk_prepare_enable(fep->clk_ipg);
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+       SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
 
 static struct platform_driver fec_driver = {
        .driver = {
index 84764345546829a02bd1cf8650d16544c5fe968e..605cc8948594626783e093db71d474d638a26bc1 100644 (file)
@@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
        return resource_size(&efx->pci_dev->resource[bar]);
 }
 
+static bool efx_ef10_is_vf(struct efx_nic *efx)
+{
+       return efx->type->is_vf;
+}
+
 static int efx_ef10_get_pf_index(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
@@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
        return efx_ef10_probe(efx);
 }
 
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+       return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+       return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+                          unsigned int port_id, u8 *mac)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+       ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+       return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+                           sizeof(inbuf), NULL, 0, NULL);
+}
+
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+                          unsigned int port_id, u8 *mac)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+       ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+       return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+                           sizeof(inbuf), NULL, 0, NULL);
+}
+
 #ifdef CONFIG_SFC_SRIOV
 static int efx_ef10_probe_vf(struct efx_nic *efx)
 {
@@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
        WARN_ON(remove_failed);
 }
 
+static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       u8 mac_old[ETH_ALEN];
+       int rc, rc2;
+
+       /* Only reconfigure a PF-created vport */
+       if (is_zero_ether_addr(nic_data->vport_mac))
+               return 0;
+
+       efx_device_detach_sync(efx);
+       efx_net_stop(efx->net_dev);
+       down_write(&efx->filter_sem);
+       efx_ef10_filter_table_remove(efx);
+       up_write(&efx->filter_sem);
+
+       rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+       if (rc)
+               goto restore_filters;
+
+       ether_addr_copy(mac_old, nic_data->vport_mac);
+       rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+                                   nic_data->vport_mac);
+       if (rc)
+               goto restore_vadaptor;
+
+       rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+                                   efx->net_dev->dev_addr);
+       if (!rc) {
+               ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
+       } else {
+               rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+               if (rc2) {
+                       /* Failed to add original MAC, so clear vport_mac */
+                       eth_zero_addr(nic_data->vport_mac);
+                       goto reset_nic;
+               }
+       }
+
+restore_vadaptor:
+       rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+       if (rc2)
+               goto reset_nic;
+restore_filters:
+       down_write(&efx->filter_sem);
+       rc2 = efx_ef10_filter_table_probe(efx);
+       up_write(&efx->filter_sem);
+       if (rc2)
+               goto reset_nic;
+
+       rc2 = efx_net_open(efx->net_dev);
+       if (rc2)
+               goto reset_nic;
+
+       netif_device_attach(efx->net_dev);
+
+       return rc;
+
+reset_nic:
+       netif_err(efx, drv, efx->net_dev,
+                 "Failed to restore when changing MAC address - scheduling reset\n");
+       efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
+
+       return rc ? rc : rc2;
+}
+
 static int efx_ef10_set_mac_address(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -3820,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
                        efx->net_dev->dev_addr);
        MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
                       nic_data->vport_id);
-       rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
-                         sizeof(inbuf), NULL, 0, NULL);
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+                               sizeof(inbuf), NULL, 0, NULL);
 
        efx_ef10_filter_table_probe(efx);
        up_write(&efx->filter_sem);
@@ -3829,38 +3942,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
                efx_net_open(efx->net_dev);
        netif_device_attach(efx->net_dev);
 
-#if !defined(CONFIG_SFC_SRIOV)
-       if (rc == -EPERM)
-               netif_err(efx, drv, efx->net_dev,
-                         "Cannot change MAC address; use sfboot to enable mac-spoofing"
-                         " on this interface\n");
-#else
-       if (rc == -EPERM) {
+#ifdef CONFIG_SFC_SRIOV
+       if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
                struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
 
-               /* Switch to PF and change MAC address on vport */
-               if (efx->pci_dev->is_virtfn && pci_dev_pf) {
-                       struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+               if (rc == -EPERM) {
+                       struct efx_nic *efx_pf;
 
-                       if (!efx_ef10_sriov_set_vf_mac(efx_pf,
-                                                      nic_data->vf_index,
-                                                      efx->net_dev->dev_addr))
-                               return 0;
-               }
-               netif_err(efx, drv, efx->net_dev,
-                         "Cannot change MAC address; use sfboot to enable mac-spoofing"
-                         " on this interface\n");
-       } else if (efx->pci_dev->is_virtfn) {
-               /* Successfully changed by VF (with MAC spoofing), so update the
-                * parent PF if possible.
-                */
-               struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+                       /* Switch to PF and change MAC address on vport */
+                       efx_pf = pci_get_drvdata(pci_dev_pf);
 
-               if (pci_dev_pf) {
+                       rc = efx_ef10_sriov_set_vf_mac(efx_pf,
+                                                      nic_data->vf_index,
+                                                      efx->net_dev->dev_addr);
+               } else if (!rc) {
                        struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
                        struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
                        unsigned int i;
 
+                       /* MAC address successfully changed by VF (with MAC
+                        * spoofing) so update the parent PF if possible.
+                        */
                        for (i = 0; i < efx_pf->vf_count; ++i) {
                                struct ef10_vf *vf = nic_data->vf + i;
 
@@ -3871,8 +3973,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
                                }
                        }
                }
-       }
+       } else
 #endif
+       if (rc == -EPERM) {
+               netif_err(efx, drv, efx->net_dev,
+                         "Cannot change MAC address; use sfboot to enable"
+                         " mac-spoofing on this interface\n");
+       } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
+               /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
+                * fall-back to the method of changing the MAC address on the
+                * vport.  This only applies to PFs because such versions of
+                * MCFW do not support VFs.
+                */
+               rc = efx_ef10_vport_set_mac_address(efx);
+       } else {
+               efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
+                                      sizeof(inbuf), NULL, 0, rc);
+       }
+
        return rc;
 }
 
index 6c9b6e45509a18706ef9488e097f83f9f4a41ecf..3c17f274e80200ca8bb08b5a0db62aa34a943af9 100644 (file)
@@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
                            NULL, 0, NULL);
 }
 
-static int efx_ef10_vport_add_mac(struct efx_nic *efx,
-                                 unsigned int port_id, u8 *mac)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
-
-       MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
-       ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
-
-       return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
-                           sizeof(inbuf), NULL, 0, NULL);
-}
-
-static int efx_ef10_vport_del_mac(struct efx_nic *efx,
-                                 unsigned int port_id, u8 *mac)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
-
-       MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
-       ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
-
-       return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
-                           sizeof(inbuf), NULL, 0, NULL);
-}
-
 static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
                                  unsigned int vswitch_type)
 {
@@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
                            NULL, 0, NULL);
 }
 
-static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
-
-       MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
-       return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
-                           NULL, 0, NULL);
-}
-
-static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
-
-       MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
-       return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
-                           NULL, 0, NULL);
-}
-
 static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
                                  MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
                                  vf->vlan, &vf->vport_id);
        if (rc)
-               goto reset_nic;
+               goto reset_nic_up_write;
 
 restore_mac:
        if (!is_zero_ether_addr(vf->mac)) {
                rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
                if (rc2) {
                        eth_zero_addr(vf->mac);
-                       goto reset_nic;
+                       goto reset_nic_up_write;
                }
        }
 
 restore_evb_port:
        rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
        if (rc2)
-               goto reset_nic;
+               goto reset_nic_up_write;
        else
                vf->vport_assigned = 1;
 
@@ -662,14 +620,16 @@ restore_vadaptor:
        if (vf->efx) {
                rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
                if (rc2)
-                       goto reset_nic;
+                       goto reset_nic_up_write;
        }
 
 restore_filters:
        if (vf->efx) {
                rc2 = vf->efx->type->filter_table_probe(vf->efx);
                if (rc2)
-                       goto reset_nic;
+                       goto reset_nic_up_write;
+
+               up_write(&vf->efx->filter_sem);
 
                up_write(&vf->efx->filter_sem);
 
@@ -681,9 +641,12 @@ restore_filters:
        }
        return rc;
 
+reset_nic_up_write:
+       if (vf->efx)
+               up_write(&vf->efx->filter_sem);
+
 reset_nic:
        if (vf->efx) {
-               up_write(&vf->efx->filter_sem);
                netif_err(efx, drv, efx->net_dev,
                          "Failed to restore VF - scheduling reset.\n");
                efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
index db4ef537c6106c2627eaec7f9a9a6a45043ad4ef..6d25b92cb45e9ed1884ef806998db9523ebb452a 100644 (file)
@@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
 int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
 void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
 void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+                          unsigned int port_id, u8 *mac);
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+                          unsigned int port_id, u8 *mac);
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
 
 #endif /* EF10_SRIOV_H */
index 804b9ad553d3841b076d4f3e8ee6ba627396226f..03bc03b67f08b3224f6e0d13b1fc0c189780c1af 100644 (file)
@@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
  */
 static int efx_process_channel(struct efx_channel *channel, int budget)
 {
+       struct efx_tx_queue *tx_queue;
        int spent;
 
        if (unlikely(!channel->enabled))
                return 0;
 
+       efx_for_each_channel_tx_queue(tx_queue, channel) {
+               tx_queue->pkts_compl = 0;
+               tx_queue->bytes_compl = 0;
+       }
+
        spent = efx_nic_process_eventq(channel, budget);
        if (spent && efx_channel_has_rx_queue(channel)) {
                struct efx_rx_queue *rx_queue =
@@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
                efx_fast_push_rx_descriptors(rx_queue, true);
        }
 
+       /* Update BQL */
+       efx_for_each_channel_tx_queue(tx_queue, channel) {
+               if (tx_queue->bytes_compl) {
+                       netdev_tx_completed_queue(tx_queue->core_txq,
+                               tx_queue->pkts_compl, tx_queue->bytes_compl);
+               }
+       }
+
        return spent;
 }
 
index d72f522bf9c3b2c0d9df322a7798ec1be6a1aba8..47d1e3a96522668a1cf1c80cacd141d2afbf1193 100644 (file)
@@ -241,6 +241,8 @@ struct efx_tx_queue {
        unsigned int read_count ____cacheline_aligned_in_smp;
        unsigned int old_write_count;
        unsigned int merge_events;
+       unsigned int bytes_compl;
+       unsigned int pkts_compl;
 
        /* Members used only on the xmit path */
        unsigned int insert_count ____cacheline_aligned_in_smp;
index aaf2987512b5db7472bdef518c4aa4c510612ef7..1833a01465711d56d1b2e09b03090792929d3a1b 100644 (file)
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
 
        efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
-       netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+       tx_queue->pkts_compl += pkts_compl;
+       tx_queue->bytes_compl += bytes_compl;
 
        if (pkts_compl > 1)
                ++tx_queue->merge_events;
index 462820514faea05beaa77f5a7e94ede318c294c6..f335bf119ab57d3e209a81e28cc7cbce5f9208c7 100644 (file)
@@ -138,19 +138,6 @@ do {                                                               \
 #define CPSW_CMINTMAX_INTVL    (1000 / CPSW_CMINTMIN_CNT)
 #define CPSW_CMINTMIN_INTVL    ((1000 / CPSW_CMINTMAX_CNT) + 1)
 
-#define cpsw_enable_irq(priv)  \
-       do {                    \
-               u32 i;          \
-               for (i = 0; i < priv->num_irqs; i++) \
-                       enable_irq(priv->irqs_table[i]); \
-       } while (0)
-#define cpsw_disable_irq(priv) \
-       do {                    \
-               u32 i;          \
-               for (i = 0; i < priv->num_irqs; i++) \
-                       disable_irq_nosync(priv->irqs_table[i]); \
-       } while (0)
-
 #define cpsw_slave_index(priv)                         \
                ((priv->data.dual_emac) ? priv->emac_port :     \
                priv->data.active_slave)
@@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
                                (func)(slave++, ##arg);                 \
        } while (0)
 #define cpsw_get_slave_ndev(priv, __slave_no__)                                \
-       (priv->slaves[__slave_no__].ndev)
+       ((__slave_no__ < priv->data.slaves) ?                           \
+               priv->slaves[__slave_no__].ndev : NULL)
 #define cpsw_get_slave_priv(priv, __slave_no__)                                \
-       ((priv->slaves[__slave_no__].ndev) ?                            \
+       (((__slave_no__ < priv->data.slaves) &&                         \
+               (priv->slaves[__slave_no__].ndev)) ?                    \
                netdev_priv(priv->slaves[__slave_no__].ndev) : NULL)    \
 
 #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb)                \
@@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
-               cpsw_disable_irq(priv);
+               disable_irq_nosync(priv->irqs_table[0]);
                priv->irq_enabled = false;
        }
 
@@ -817,7 +806,7 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
                prim_cpsw = cpsw_get_slave_priv(priv, 0);
                if (prim_cpsw->irq_enabled == false) {
                        prim_cpsw->irq_enabled = true;
-                       cpsw_enable_irq(priv);
+                       enable_irq(priv->irqs_table[0]);
                }
        }
 
@@ -1333,7 +1322,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
        if (prim_cpsw->irq_enabled == false) {
                if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
                        prim_cpsw->irq_enabled = true;
-                       cpsw_enable_irq(prim_cpsw);
+                       enable_irq(prim_cpsw->irqs_table[0]);
                }
        }
 
index 4208dd7ef10118aff0fa4bf02cd47e05481c25f9..d95f9aae95e78ecc08b8a91bb67c267d10f81461 100644 (file)
@@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev)
        /* Map device registers */
        ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
-       if (!lp->regs) {
+       if (IS_ERR(lp->regs)) {
                dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(lp->regs);
                goto free_netdev;
        }
 
@@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev)
                goto free_netdev;
        }
        lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
-       if (!lp->dma_regs) {
+       if (IS_ERR(lp->dma_regs)) {
                dev_err(&pdev->dev, "could not map DMA regs\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(lp->dma_regs);
                goto free_netdev;
        }
        lp->rx_irq = irq_of_parse_and_map(np, 1);
index 7856b6ccf5c547e5c4eaf790d111f40527c375fa..d95a50ae996dd4d57f78e67b51a7a3e79f8caaf3 100644 (file)
@@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev)
        memcpy(dev->dev_addr,  &ax25_defaddr, AX25_ADDR_LEN);
 
        dev->flags      = 0;
+       dev->features   = NETIF_F_LLTX; /* Allow recursion */
 
 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
        dev->header_ops      = &ax25_header_ops;
index f8370808a018f77503de49503985f3d5c55985f6..3b933bb5a8d5084208c2a9903ab44317cae5d146 100644 (file)
@@ -1355,6 +1355,7 @@ static void macvtap_exit(void)
        class_unregister(macvtap_class);
        cdev_del(&macvtap_cdev);
        unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+       idr_destroy(&minor_idr);
 }
 module_exit(macvtap_exit);
 
index cf18940f4e84f5352a6dffe7a72548d72ef3a011..cb86d7a0154228f5c3711898072a0e5f5d4d6a81 100644 (file)
@@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO
 
 config MDIO_BUS_MUX_MMIOREG
        tristate "Support for MMIO device-controlled MDIO bus multiplexers"
-       depends on OF_MDIO
+       depends on OF_MDIO && HAS_IOMEM
        select MDIO_BUS_MUX
        help
          This module provides a driver for MDIO bus multiplexers that
index 4545e78840b0d9dc80ae4392b36cfcf588f17c3a..35a2bffe848ad150a3c6bf0e1d66365ae605ac77 100644 (file)
@@ -523,6 +523,7 @@ static const struct driver_info wwan_info = {
 #define REALTEK_VENDOR_ID      0x0bda
 #define SAMSUNG_VENDOR_ID      0x04e8
 #define LENOVO_VENDOR_ID       0x17ef
+#define NVIDIA_VENDOR_ID       0x0955
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -710,6 +711,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index e4b7a47a825c7f686e48992b23d3f1ee30555d71..efc18e05af0aed216144e91b7a835342a1b30a59 100644 (file)
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
        if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
                goto err;
 
-       ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+       ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
        if (ret)
                goto err;
 
index 8067b8fbb0eea42b106cc56ffe6bc216ae01f85a..db40175b1a0b081dfcf5c8eb829d935618324d10 100644 (file)
@@ -6,7 +6,7 @@
  * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
  *
  * USB Host Driver for Network Control Model (NCM)
- * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
  *
  * The NCM encoding, decoding and initialization logic
  * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
@@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
                ctx->tx_curr_skb = NULL;
        }
 
+       kfree(ctx->delayed_ndp16);
+
        kfree(ctx);
 }
 
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
 {
        const struct usb_cdc_union_desc *union_desc = NULL;
        struct cdc_ncm_ctx *ctx;
@@ -855,6 +857,17 @@ advance:
        /* finish setting up the device specific data */
        cdc_ncm_setup(dev);
 
+       /* Device-specific flags */
+       ctx->drvflags = drvflags;
+
+       /* Allocate the delayed NDP if needed. */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+               ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+               if (!ctx->delayed_ndp16)
+                       goto error2;
+               dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
+       }
+
        /* override ethtool_ops */
        dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
 
@@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
        if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
                return -ENODEV;
 
-       /* The NCM data altsetting is fixed */
-       ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
+       /* The NCM data altsetting is fixed, so we hard-coded it.
+        * Additionally, generic NCM devices are assumed to accept arbitrarily
+        * placed NDP.
+        */
+       ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
 
        /*
         * We should get an event when network connection is "connected" or
@@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
        struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
        size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
 
+       /* If NDP should be moved to the end of the NCM package, we can't follow the
+       * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
+       * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+       */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+               if (ctx->delayed_ndp16->dwSignature == sign)
+                       return ctx->delayed_ndp16;
+
        /* follow the chain of NDPs, looking for a match */
        while (ndpoffset) {
                ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
        }
 
        /* align new NDP */
-       cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+       if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+               cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
 
        /* verify that there is room for the NDP and the datagram (reserve) */
        if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
@@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
                nth16->wNdpIndex = cpu_to_le16(skb->len);
 
        /* push a new empty NDP */
-       ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+       if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+               ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+       else
+               ndp16 = ctx->delayed_ndp16;
+
        ndp16->dwSignature = sign;
        ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
        return ndp16;
@@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
        struct sk_buff *skb_out;
        u16 n = 0, index, ndplen;
        u8 ready2send = 0;
+       u32 delayed_ndp_size;
+
+       /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
+        * accordingly. Otherwise, we should check here.
+        */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+               delayed_ndp_size = ctx->max_ndp_size;
+       else
+               delayed_ndp_size = 0;
 
        /* if there is a remaining skb, it gets priority */
        if (skb != NULL) {
@@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                cdc_ncm_align_tail(skb_out,  ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
 
                /* check if we had enough room left for both NDP and frame */
-               if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
+               if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) {
                        if (n == 0) {
                                /* won't fit, MTU problem? */
                                dev_kfree_skb_any(skb);
@@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                /* variables will be reset at next call */
        }
 
+       /* If requested, put NDP at end of frame. */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+               nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+               cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+               nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+               memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
+
+               /* Zero out delayed NDP - signature checking will naturally fail. */
+               ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+       }
+
        /* If collected data size is less or equal ctx->min_tx_pkt
         * bytes, we send buffers as it is. If we get more data, it
         * would be more efficient for USB HS mobile device with DMA
index 735f7dadb9a0740dea86ed58d55df1ae58edf9ef..2680a65cd5e4fde5e333ef1ca1ff5d5ad4091222 100644 (file)
@@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
        struct usb_driver *subdriver = ERR_PTR(-ENODEV);
        int ret = -ENODEV;
        struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+       int drvflags = 0;
 
        /* altsetting should always be 1 for NCM devices - so we hard-coded
-        * it here
+        * it here. Some huawei devices will need the NDP part of the NCM package to
+        * be at the end of the frame.
         */
-       ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+       drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+       ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
        if (ret)
                goto err;
 
index aafa1a1898e43de0d3d06e7d8367751473f25142..7f6419ebb5e1cd8c7fc5abbe10170a4ad6d92ea8 100644 (file)
@@ -494,6 +494,7 @@ enum rtl8152_flags {
 #define VENDOR_ID_REALTEK              0x0bda
 #define VENDOR_ID_SAMSUNG              0x04e8
 #define VENDOR_ID_LENOVO               0x17ef
+#define VENDOR_ID_NVIDIA               0x0955
 
 #define MCU_TYPE_PLA                   0x0100
 #define MCU_TYPE_USB                   0x0000
@@ -4117,6 +4118,7 @@ static struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
        {}
 };
 
index da11bb5e9c7fb82edd383384fe2fd392b7a1c59d..46f4caddccbed84a56b12aad93a683618401fbdb 100644 (file)
@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
        static const u32 rxprod_reg[2] = {
                VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
        };
-       u32 num_rxd = 0;
+       u32 num_pkts = 0;
        bool skip_page_frags = false;
        struct Vmxnet3_RxCompDesc *rcd;
        struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                struct Vmxnet3_RxDesc *rxd;
                u32 idx, ring_idx;
                struct vmxnet3_cmd_ring *ring = NULL;
-               if (num_rxd >= quota) {
+               if (num_pkts >= quota) {
                        /* we may stop even before we see the EOP desc of
                         * the current pkt
                         */
                        break;
                }
-               num_rxd++;
                BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
                idx = rcd->rxdIdx;
                ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
                                napi_gro_receive(&rq->napi, skb);
 
                        ctx->skb = NULL;
+                       num_pkts++;
                }
 
 rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
                                  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
        }
 
-       return num_rxd;
+       return num_pkts;
 }
 
 
index feacc3b994b7e621570ab93a6c142eb7aa5e9547..2f0bd6955f3398d65a4a71177fa8bbd56b446825 100644 (file)
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close);
  *     @dev: The network device to attach
  *     @c: The Z8530 channel to configure in sync DMA mode.
  *
- *     Set up a Z85x30 device for synchronous DMA tranmission. One
+ *     Set up a Z85x30 device for synchronous DMA transmission. One
  *     ISA DMA channel must be available for this to work. The receive
  *     side is run in PIO mode, but then it has the bigger FIFO.
  */
index 8eb22c0ca7ce40ea030bd7be11bf8f0f0c10c10a..7e2c43f701bc451463c5273ed3a5a1e49f6be15f 100644 (file)
@@ -535,8 +535,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                                        __func__, dimm_name, cmd_name, i);
                        return -ENXIO;
                }
-               if (!access_ok(VERIFY_READ, p + in_len, in_size))
-                       return -EFAULT;
                if (in_len < sizeof(in_env))
                        copy = min_t(u32, sizeof(in_env) - in_len, in_size);
                else
@@ -557,8 +555,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                                        __func__, dimm_name, cmd_name, i);
                        return -EFAULT;
                }
-               if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size))
-                       return -EFAULT;
                if (out_len < sizeof(out_env))
                        copy = min_t(u32, sizeof(out_env) - out_len, out_size);
                else
@@ -570,9 +566,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
        }
 
        buf_len = out_len + in_len;
-       if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len)))
-               return -EFAULT;
-
        if (buf_len > ND_IOCTL_MAX_BUFLEN) {
                dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
                                dimm_name, cmd_name, buf_len,
@@ -706,8 +699,10 @@ int __init nvdimm_bus_init(void)
        nvdimm_major = rc;
 
        nd_class = class_create(THIS_MODULE, "nd");
-       if (IS_ERR(nd_class))
+       if (IS_ERR(nd_class)) {
+               rc = PTR_ERR(nd_class);
                goto err_class;
+       }
 
        return 0;
 
index 515f33882ab89ae27ea4d7006e818fa8b15fffb3..49c1720df59a8550cdca31735e6ef519f6b88266 100644 (file)
@@ -7,7 +7,6 @@
  *     Bjorn Helgaas <bjorn.helgaas@hp.com>
  */
 
-#include <linux/acpi.h>
 #include <linux/pnp.h>
 #include <linux/device.h>
 #include <linux/init.h>
@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
        {"", 0}
 };
 
-#ifdef CONFIG_ACPI
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
-       u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
-       return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
-}
-#else
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
-       struct resource *res;
-
-       res = io ? request_region(start, length, desc) :
-               request_mem_region(start, length, desc);
-       if (res) {
-               res->flags &= ~IORESOURCE_BUSY;
-               return true;
-       }
-       return false;
-}
-#endif
-
 static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
 {
        char *regionid;
        const char *pnpid = dev_name(&dev->dev);
        resource_size_t start = r->start, end = r->end;
-       bool reserved;
+       struct resource *res;
 
        regionid = kmalloc(16, GFP_KERNEL);
        if (!regionid)
                return;
 
        snprintf(regionid, 16, "pnp %s", pnpid);
-       reserved = __reserve_range(start, end - start + 1, !!port, regionid);
-       if (!reserved)
+       if (port)
+               res = request_region(start, end - start + 1, regionid);
+       else
+               res = request_mem_region(start, end - start + 1, regionid);
+       if (res)
+               res->flags &= ~IORESOURCE_BUSY;
+       else
                kfree(regionid);
 
        /*
@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
         * have double reservations.
         */
        dev_info(&dev->dev, "%pR %s reserved\n", r,
-                reserved ? "has been" : "could not be");
+                res ? "has been" : "could not be");
 }
 
 static void reserve_resources_of_dev(struct pnp_dev *dev)
index 86621fabbb8b3105a9b8e404e6c18b2f3830347d..735355b0e0233529ca8ed1fbbf4df40dec867bb9 100644 (file)
@@ -121,6 +121,7 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
 #define REG_3          0x0004a0
 #define REG_4          0x000600
 #define REG_6          0x000800
+#define REG_7          0x000804
 #define REG_8          0x000820
 #define REG_9          0x000a04
 #define REG_10         0x018000
@@ -135,6 +136,8 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
 #define REG_21         0x200218
 #define REG_22         0x0005a0
 #define REG_23         0x0005c0
+#define REG_24         0x000808
+#define REG_25         0x000b00
 #define REG_26         0x200118
 #define REG_27         0x200308
 #define REG_32         0x21003c
@@ -429,6 +432,9 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
 #define SET_LENXY_START_RECFILL(fb, lenxy) \
        WRITE_WORD(lenxy, fb, REG_9)
 
+#define SETUP_COPYAREA(fb) \
+       WRITE_BYTE(0, fb, REG_16b1)
+
 static void
 HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
 {
@@ -1004,6 +1010,36 @@ stifb_blank(int blank_mode, struct fb_info *info)
        return 0;
 }
 
+static void
+stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+       struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+       SETUP_COPYAREA(fb);
+
+       SETUP_HW(fb);
+       if (fb->info.var.bits_per_pixel == 32) {
+               WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+               NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+       } else {
+               WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+               NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+       }
+
+       NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
+               IBOvals(RopSrc, MaskAddrOffset(0),
+               BitmapExtent08, StaticReg(1),
+               DataDynamic, MaskOtc, BGx(0), FGx(0)));
+
+       WRITE_WORD(((area->sx << 16) | area->sy), fb, REG_24);
+       WRITE_WORD(((area->width << 16) | area->height), fb, REG_7);
+       WRITE_WORD(((area->dx << 16) | area->dy), fb, REG_25);
+
+       SETUP_FB(fb);
+}
+
 static void __init
 stifb_init_display(struct stifb_info *fb)
 {
@@ -1069,7 +1105,7 @@ static struct fb_ops stifb_ops = {
        .fb_setcolreg   = stifb_setcolreg,
        .fb_blank       = stifb_blank,
        .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
+       .fb_copyarea    = stifb_copyarea,
        .fb_imageblit   = cfb_imageblit,
 };
 
@@ -1258,7 +1294,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
        info->fbops = &stifb_ops;
        info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
        info->screen_size = fix->smem_len;
-       info->flags = FBINFO_DEFAULT;
+       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
        info->pseudo_palette = &fb->pseudo_palette;
 
        /* This has to be done !!! */
index 510040b04c964dbdeab4ab47c29b789fef5c4b5f..b1dc5188804831af2d58478b700b424e5d78d895 100644 (file)
@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
        unlock_new_inode(inode);
        return inode;
 error:
-       unlock_new_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        return ERR_PTR(retval);
 
 }
index 09e4433717b8795c2c7c8c76452887ec27a91be4..e8aa57dc8d6dff5a7a46899d2ba94a4ec0b715b1 100644 (file)
@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
        unlock_new_inode(inode);
        return inode;
 error:
-       unlock_new_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        return ERR_PTR(retval);
 
 }
index 0ef5cc13fae26f8899cad7ed30b59344c79120d4..81220b2203c61f1033bc5a4a0666f9b7b96f08df 100644 (file)
@@ -44,6 +44,8 @@
 #define BTRFS_INODE_IN_DELALLOC_LIST           9
 #define BTRFS_INODE_READDIO_NEED_LOCK          10
 #define BTRFS_INODE_HAS_PROPS                  11
+/* DIO is ready to submit */
+#define BTRFS_INODE_DIO_READY                  12
 /*
  * The following 3 bits are meant only for the btree inode.
  * When any of them is set, it means an error happened while writing an
index 80a9aefb0c46fc179119d8ad67eb9f8bff687ac1..aac314e14188facff9ed95136eab647709352359 100644 (file)
@@ -1778,6 +1778,7 @@ struct btrfs_fs_info {
        spinlock_t unused_bgs_lock;
        struct list_head unused_bgs;
        struct mutex unused_bg_unpin_mutex;
+       struct mutex delete_unused_bgs_mutex;
 
        /* For btrfs to record security options */
        struct security_mnt_opts security_opts;
index 3f43bfea3684a13e378fbdd3d364c0635e14daca..a9aadb2ad5254cfe98d36a6eed1f12d4ae5e7925 100644 (file)
@@ -1751,6 +1751,7 @@ static int cleaner_kthread(void *arg)
 {
        struct btrfs_root *root = arg;
        int again;
+       struct btrfs_trans_handle *trans;
 
        do {
                again = 0;
@@ -1772,7 +1773,6 @@ static int cleaner_kthread(void *arg)
                }
 
                btrfs_run_delayed_iputs(root);
-               btrfs_delete_unused_bgs(root->fs_info);
                again = btrfs_clean_one_deleted_snapshot(root);
                mutex_unlock(&root->fs_info->cleaner_mutex);
 
@@ -1781,6 +1781,16 @@ static int cleaner_kthread(void *arg)
                 * needn't do anything special here.
                 */
                btrfs_run_defrag_inodes(root->fs_info);
+
+               /*
+                * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
+                * with relocation (btrfs_relocate_chunk) and relocation
+                * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
+                * after acquiring fs_info->delete_unused_bgs_mutex. So we
+                * can't hold, nor need to, fs_info->cleaner_mutex when deleting
+                * unused block groups.
+                */
+               btrfs_delete_unused_bgs(root->fs_info);
 sleep:
                if (!try_to_freeze() && !again) {
                        set_current_state(TASK_INTERRUPTIBLE);
@@ -1789,6 +1799,34 @@ sleep:
                        __set_current_state(TASK_RUNNING);
                }
        } while (!kthread_should_stop());
+
+       /*
+        * Transaction kthread is stopped before us and wakes us up.
+        * However we might have started a new transaction and COWed some
+        * tree blocks when deleting unused block groups for example. So
+        * make sure we commit the transaction we started to have a clean
+        * shutdown when evicting the btree inode - if it has dirty pages
+        * when we do the final iput() on it, eviction will trigger a
+        * writeback for it which will fail with null pointer dereferences
+        * since work queues and other resources were already released and
+        * destroyed by the time the iput/eviction/writeback is made.
+        */
+       trans = btrfs_attach_transaction(root);
+       if (IS_ERR(trans)) {
+               if (PTR_ERR(trans) != -ENOENT)
+                       btrfs_err(root->fs_info,
+                                 "cleaner transaction attach returned %ld",
+                                 PTR_ERR(trans));
+       } else {
+               int ret;
+
+               ret = btrfs_commit_transaction(trans, root);
+               if (ret)
+                       btrfs_err(root->fs_info,
+                                 "cleaner open transaction commit returned %d",
+                                 ret);
+       }
+
        return 0;
 }
 
@@ -2492,6 +2530,7 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->unused_bgs_lock);
        rwlock_init(&fs_info->tree_mod_log_lock);
        mutex_init(&fs_info->unused_bg_unpin_mutex);
+       mutex_init(&fs_info->delete_unused_bgs_mutex);
        mutex_init(&fs_info->reloc_mutex);
        mutex_init(&fs_info->delalloc_root_mutex);
        seqlock_init(&fs_info->profiles_lock);
index 38b76cc02f486db1e9d7f887860aba8137d61205..1c2bd1723e40ce8d1390223daf2bf4a7871ac051 100644 (file)
@@ -9889,6 +9889,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                }
                spin_unlock(&fs_info->unused_bgs_lock);
 
+               mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
+
                /* Don't want to race with allocators so take the groups_sem */
                down_write(&space_info->groups_sem);
                spin_lock(&block_group->lock);
@@ -9983,6 +9985,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 end_trans:
                btrfs_end_transaction(trans, root);
 next:
+               mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                btrfs_put_block_group(block_group);
                spin_lock(&fs_info->unused_bgs_lock);
        }
index f6a596d5a6374f1dbf66279e24ef17aed9ff399d..d4a582ac3f730f82299e997d0866e3caecacac7b 100644 (file)
@@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
 {
        struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
        struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+       spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
        struct btrfs_free_space *info;
        struct rb_node *n;
        u64 count;
@@ -254,24 +255,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
                return;
 
        while (1) {
+               bool add_to_ctl = true;
+
+               spin_lock(rbroot_lock);
                n = rb_first(rbroot);
-               if (!n)
+               if (!n) {
+                       spin_unlock(rbroot_lock);
                        break;
+               }
 
                info = rb_entry(n, struct btrfs_free_space, offset_index);
                BUG_ON(info->bitmap); /* Logic error */
 
                if (info->offset > root->ino_cache_progress)
-                       goto free;
+                       add_to_ctl = false;
                else if (info->offset + info->bytes > root->ino_cache_progress)
                        count = root->ino_cache_progress - info->offset + 1;
                else
                        count = info->bytes;
 
-               __btrfs_add_free_space(ctl, info->offset, count);
-free:
                rb_erase(&info->offset_index, rbroot);
-               kfree(info);
+               spin_unlock(rbroot_lock);
+               if (add_to_ctl)
+                       __btrfs_add_free_space(ctl, info->offset, count);
+               kmem_cache_free(btrfs_free_space_cachep, info);
        }
 }
 
index 855935f6671ae59b1b025c3916d553bc689191ef..b33c0cf02668bde4d3dbeb60d0b1f411d58c648d 100644 (file)
@@ -4989,8 +4989,9 @@ static void evict_inode_truncate_pages(struct inode *inode)
        /*
         * Keep looping until we have no more ranges in the io tree.
         * We can have ongoing bios started by readpages (called from readahead)
-        * that didn't get their end io callbacks called yet or they are still
-        * in progress ((extent_io.c:end_bio_extent_readpage()). This means some
+        * that have their endio callback (extent_io.c:end_bio_extent_readpage)
+        * still in progress (unlocked the pages in the bio but did not yet
+        * unlocked the ranges in the io tree). Therefore this means some
         * ranges can still be locked and eviction started because before
         * submitting those bios, which are executed by a separate task (work
         * queue kthread), inode references (inode->i_count) were not taken
@@ -7546,6 +7547,7 @@ unlock:
 
                current->journal_info = outstanding_extents;
                btrfs_free_reserved_data_space(inode, len);
+               set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags);
        }
 
        /*
@@ -7871,8 +7873,6 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
        struct bio *dio_bio;
        int ret;
 
-       if (err)
-               goto out_done;
 again:
        ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
                                                   &ordered_offset,
@@ -7895,7 +7895,6 @@ out_test:
                ordered = NULL;
                goto again;
        }
-out_done:
        dio_bio = dip->dio_bio;
 
        kfree(dip);
@@ -8163,9 +8162,8 @@ out_err:
 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
                                struct inode *inode, loff_t file_offset)
 {
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_dio_private *dip;
-       struct bio *io_bio;
+       struct btrfs_dio_private *dip = NULL;
+       struct bio *io_bio = NULL;
        struct btrfs_io_bio *btrfs_bio;
        int skip_sum;
        int write = rw & REQ_WRITE;
@@ -8182,7 +8180,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
        dip = kzalloc(sizeof(*dip), GFP_NOFS);
        if (!dip) {
                ret = -ENOMEM;
-               goto free_io_bio;
+               goto free_ordered;
        }
 
        dip->private = dio_bio->bi_private;
@@ -8210,25 +8208,55 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
 
        if (btrfs_bio->end_io)
                btrfs_bio->end_io(btrfs_bio, ret);
-free_io_bio:
-       bio_put(io_bio);
 
 free_ordered:
        /*
-        * If this is a write, we need to clean up the reserved space and kill
-        * the ordered extent.
+        * If we arrived here it means either we failed to submit the dip
+        * or we either failed to clone the dio_bio or failed to allocate the
+        * dip. If we cloned the dio_bio and allocated the dip, we can just
+        * call bio_endio against our io_bio so that we get proper resource
+        * cleanup if we fail to submit the dip, otherwise, we must do the
+        * same as btrfs_endio_direct_[write|read] because we can't call these
+        * callbacks - they require an allocated dip and a clone of dio_bio.
         */
-       if (write) {
-               struct btrfs_ordered_extent *ordered;
-               ordered = btrfs_lookup_ordered_extent(inode, file_offset);
-               if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
-                   !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
-                       btrfs_free_reserved_extent(root, ordered->start,
-                                                  ordered->disk_len, 1);
-               btrfs_put_ordered_extent(ordered);
-               btrfs_put_ordered_extent(ordered);
+       if (io_bio && dip) {
+               bio_endio(io_bio, ret);
+               /*
+                * The end io callbacks free our dip, do the final put on io_bio
+                * and all the cleanup and final put for dio_bio (through
+                * dio_end_io()).
+                */
+               dip = NULL;
+               io_bio = NULL;
+       } else {
+               if (write) {
+                       struct btrfs_ordered_extent *ordered;
+
+                       ordered = btrfs_lookup_ordered_extent(inode,
+                                                             file_offset);
+                       set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
+                       /*
+                        * Decrements our ref on the ordered extent and removes
+                        * the ordered extent from the inode's ordered tree,
+                        * doing all the proper resource cleanup such as for the
+                        * reserved space and waking up any waiters for this
+                        * ordered extent (through btrfs_remove_ordered_extent).
+                        */
+                       btrfs_finish_ordered_io(ordered);
+               } else {
+                       unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
+                             file_offset + dio_bio->bi_iter.bi_size - 1);
+               }
+               clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+               /*
+                * Releases and cleans up our dio_bio, no need to bio_put()
+                * nor bio_endio()/bio_io_error() against dio_bio.
+                */
+               dio_end_io(dio_bio, ret);
        }
-       bio_endio(dio_bio, ret);
+       if (io_bio)
+               bio_put(io_bio);
+       kfree(dip);
 }
 
 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
@@ -8330,9 +8358,18 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                                   btrfs_submit_direct, flags);
        if (iov_iter_rw(iter) == WRITE) {
                current->journal_info = NULL;
-               if (ret < 0 && ret != -EIOCBQUEUED)
-                       btrfs_delalloc_release_space(inode, count);
-               else if (ret >= 0 && (size_t)ret < count)
+               if (ret < 0 && ret != -EIOCBQUEUED) {
+                       /*
+                        * If the error comes from submitting stage,
+                        * btrfs_get_blocsk_direct() has free'd data space,
+                        * and metadata space will be handled by
+                        * finish_ordered_fn, don't do that again to make
+                        * sure bytes_may_use is correct.
+                        */
+                       if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
+                                    &BTRFS_I(inode)->runtime_flags))
+                               btrfs_delalloc_release_space(inode, count);
+               } else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode,
                                                     count - (size_t)ret);
        }
index c86b835da7a8739ec16396d589ccb651278ab44d..5d91776e12a215cddf666a2a1674a5ff5749c92e 100644 (file)
@@ -87,7 +87,8 @@ struct btrfs_ioctl_received_subvol_args_32 {
 
 
 static int btrfs_clone(struct inode *src, struct inode *inode,
-                      u64 off, u64 olen, u64 olen_aligned, u64 destoff);
+                      u64 off, u64 olen, u64 olen_aligned, u64 destoff,
+                      int no_time_update);
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -2765,14 +2766,11 @@ out:
        return ret;
 }
 
-static struct page *extent_same_get_page(struct inode *inode, u64 off)
+static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
 {
        struct page *page;
-       pgoff_t index;
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 
-       index = off >> PAGE_CACHE_SHIFT;
-
        page = grab_cache_page(inode->i_mapping, index);
        if (!page)
                return NULL;
@@ -2793,6 +2791,20 @@ static struct page *extent_same_get_page(struct inode *inode, u64 off)
        return page;
 }
 
+static int gather_extent_pages(struct inode *inode, struct page **pages,
+                              int num_pages, u64 off)
+{
+       int i;
+       pgoff_t index = off >> PAGE_CACHE_SHIFT;
+
+       for (i = 0; i < num_pages; i++) {
+               pages[i] = extent_same_get_page(inode, index + i);
+               if (!pages[i])
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
 static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
 {
        /* do any pending delalloc/csum calc on src, one way or
@@ -2818,52 +2830,120 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
        }
 }
 
-static void btrfs_double_unlock(struct inode *inode1, u64 loff1,
-                               struct inode *inode2, u64 loff2, u64 len)
+static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
 {
-       unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
-       unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
-
        mutex_unlock(&inode1->i_mutex);
        mutex_unlock(&inode2->i_mutex);
 }
 
-static void btrfs_double_lock(struct inode *inode1, u64 loff1,
-                             struct inode *inode2, u64 loff2, u64 len)
+static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
+{
+       if (inode1 < inode2)
+               swap(inode1, inode2);
+
+       mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
+       if (inode1 != inode2)
+               mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+}
+
+static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
+                                     struct inode *inode2, u64 loff2, u64 len)
+{
+       unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
+       unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
+}
+
+static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+                                    struct inode *inode2, u64 loff2, u64 len)
 {
        if (inode1 < inode2) {
                swap(inode1, inode2);
                swap(loff1, loff2);
        }
-
-       mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
        lock_extent_range(inode1, loff1, len);
-       if (inode1 != inode2) {
-               mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+       if (inode1 != inode2)
                lock_extent_range(inode2, loff2, len);
+}
+
+struct cmp_pages {
+       int             num_pages;
+       struct page     **src_pages;
+       struct page     **dst_pages;
+};
+
+static void btrfs_cmp_data_free(struct cmp_pages *cmp)
+{
+       int i;
+       struct page *pg;
+
+       for (i = 0; i < cmp->num_pages; i++) {
+               pg = cmp->src_pages[i];
+               if (pg)
+                       page_cache_release(pg);
+               pg = cmp->dst_pages[i];
+               if (pg)
+                       page_cache_release(pg);
+       }
+       kfree(cmp->src_pages);
+       kfree(cmp->dst_pages);
+}
+
+static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
+                                 struct inode *dst, u64 dst_loff,
+                                 u64 len, struct cmp_pages *cmp)
+{
+       int ret;
+       int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+       struct page **src_pgarr, **dst_pgarr;
+
+       /*
+        * We must gather up all the pages before we initiate our
+        * extent locking. We use an array for the page pointers. Size
+        * of the array is bounded by len, which is in turn bounded by
+        * BTRFS_MAX_DEDUPE_LEN.
+        */
+       src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
+       dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
+       if (!src_pgarr || !dst_pgarr) {
+               kfree(src_pgarr);
+               kfree(dst_pgarr);
+               return -ENOMEM;
        }
+       cmp->num_pages = num_pages;
+       cmp->src_pages = src_pgarr;
+       cmp->dst_pages = dst_pgarr;
+
+       ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
+       if (ret)
+               goto out;
+
+       ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
+
+out:
+       if (ret)
+               btrfs_cmp_data_free(cmp);
+       return 0;
 }
 
 static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
-                         u64 dst_loff, u64 len)
+                         u64 dst_loff, u64 len, struct cmp_pages *cmp)
 {
        int ret = 0;
+       int i;
        struct page *src_page, *dst_page;
        unsigned int cmp_len = PAGE_CACHE_SIZE;
        void *addr, *dst_addr;
 
+       i = 0;
        while (len) {
                if (len < PAGE_CACHE_SIZE)
                        cmp_len = len;
 
-               src_page = extent_same_get_page(src, loff);
-               if (!src_page)
-                       return -EINVAL;
-               dst_page = extent_same_get_page(dst, dst_loff);
-               if (!dst_page) {
-                       page_cache_release(src_page);
-                       return -EINVAL;
-               }
+               BUG_ON(i >= cmp->num_pages);
+
+               src_page = cmp->src_pages[i];
+               dst_page = cmp->dst_pages[i];
+
                addr = kmap_atomic(src_page);
                dst_addr = kmap_atomic(dst_page);
 
@@ -2875,15 +2955,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
 
                kunmap_atomic(addr);
                kunmap_atomic(dst_addr);
-               page_cache_release(src_page);
-               page_cache_release(dst_page);
 
                if (ret)
                        break;
 
-               loff += cmp_len;
-               dst_loff += cmp_len;
                len -= cmp_len;
+               i++;
        }
 
        return ret;
@@ -2914,27 +2991,62 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
 {
        int ret;
        u64 len = olen;
+       struct cmp_pages cmp;
+       int same_inode = 0;
+       u64 same_lock_start = 0;
+       u64 same_lock_len = 0;
 
-       /*
-        * btrfs_clone() can't handle extents in the same file
-        * yet. Once that works, we can drop this check and replace it
-        * with a check for the same inode, but overlapping extents.
-        */
        if (src == dst)
-               return -EINVAL;
+               same_inode = 1;
 
        if (len == 0)
                return 0;
 
-       btrfs_double_lock(src, loff, dst, dst_loff, len);
+       if (same_inode) {
+               mutex_lock(&src->i_mutex);
 
-       ret = extent_same_check_offsets(src, loff, &len, olen);
-       if (ret)
-               goto out_unlock;
+               ret = extent_same_check_offsets(src, loff, &len, olen);
+               if (ret)
+                       goto out_unlock;
 
-       ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
-       if (ret)
-               goto out_unlock;
+               /*
+                * Single inode case wants the same checks, except we
+                * don't want our length pushed out past i_size as
+                * comparing that data range makes no sense.
+                *
+                * extent_same_check_offsets() will do this for an
+                * unaligned length at i_size, so catch it here and
+                * reject the request.
+                *
+                * This effectively means we require aligned extents
+                * for the single-inode case, whereas the other cases
+                * allow an unaligned length so long as it ends at
+                * i_size.
+                */
+               if (len != olen) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
+               /* Check for overlapping ranges */
+               if (dst_loff + len > loff && dst_loff < loff + len) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
+               same_lock_start = min_t(u64, loff, dst_loff);
+               same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+       } else {
+               btrfs_double_inode_lock(src, dst);
+
+               ret = extent_same_check_offsets(src, loff, &len, olen);
+               if (ret)
+                       goto out_unlock;
+
+               ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
+               if (ret)
+                       goto out_unlock;
+       }
 
        /* don't make the dst file partly checksummed */
        if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
@@ -2943,12 +3055,32 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                goto out_unlock;
        }
 
-       ret = btrfs_cmp_data(src, loff, dst, dst_loff, len);
+       ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
+       if (ret)
+               goto out_unlock;
+
+       if (same_inode)
+               lock_extent_range(src, same_lock_start, same_lock_len);
+       else
+               btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+
+       /* pass original length for comparison so we stay within i_size */
+       ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
        if (ret == 0)
-               ret = btrfs_clone(src, dst, loff, olen, len, dst_loff);
+               ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
+
+       if (same_inode)
+               unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
+                             same_lock_start + same_lock_len - 1);
+       else
+               btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
 
+       btrfs_cmp_data_free(&cmp);
 out_unlock:
-       btrfs_double_unlock(src, loff, dst, dst_loff, len);
+       if (same_inode)
+               mutex_unlock(&src->i_mutex);
+       else
+               btrfs_double_inode_unlock(src, dst);
 
        return ret;
 }
@@ -3100,13 +3232,15 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
                                     struct inode *inode,
                                     u64 endoff,
                                     const u64 destoff,
-                                    const u64 olen)
+                                    const u64 olen,
+                                    int no_time_update)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret;
 
        inode_inc_iversion(inode);
-       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       if (!no_time_update)
+               inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        /*
         * We round up to the block size at eof when determining which
         * extents to clone above, but shouldn't round up the file size.
@@ -3191,13 +3325,13 @@ static void clone_update_extent_map(struct inode *inode,
  * @inode: Inode to clone to
  * @off: Offset within source to start clone from
  * @olen: Original length, passed by user, of range to clone
- * @olen_aligned: Block-aligned value of olen, extent_same uses
- *               identical values here
+ * @olen_aligned: Block-aligned value of olen
  * @destoff: Offset within @inode to start clone
+ * @no_time_update: Whether to update mtime/ctime on the target inode
  */
 static int btrfs_clone(struct inode *src, struct inode *inode,
                       const u64 off, const u64 olen, const u64 olen_aligned,
-                      const u64 destoff)
+                      const u64 destoff, int no_time_update)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_path *path = NULL;
@@ -3521,7 +3655,8 @@ process_slot:
                                              root->sectorsize);
                        ret = clone_finish_inode_update(trans, inode,
                                                        last_dest_end,
-                                                       destoff, olen);
+                                                       destoff, olen,
+                                                       no_time_update);
                        if (ret)
                                goto out;
                        if (new_key.offset + datal >= destoff + len)
@@ -3559,7 +3694,7 @@ process_slot:
                clone_update_extent_map(inode, trans, NULL, last_dest_end,
                                        destoff + len - last_dest_end);
                ret = clone_finish_inode_update(trans, inode, destoff + len,
-                                               destoff, olen);
+                                               destoff, olen, no_time_update);
        }
 
 out:
@@ -3696,7 +3831,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                lock_extent_range(inode, destoff, len);
        }
 
-       ret = btrfs_clone(src, inode, off, olen, len, destoff);
+       ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
 
        if (same_inode) {
                u64 lock_start = min_t(u64, off, destoff);
index 89656d799ff6fa3772b52cb17d535ebdc1cbb910..52170cf1757e3d25192a6cf6211a9d2edaa52751 100644 (file)
@@ -552,6 +552,10 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
        trace_btrfs_ordered_extent_put(entry->inode, entry);
 
        if (atomic_dec_and_test(&entry->refs)) {
+               ASSERT(list_empty(&entry->log_list));
+               ASSERT(list_empty(&entry->trans_list));
+               ASSERT(list_empty(&entry->root_extent_list));
+               ASSERT(RB_EMPTY_NODE(&entry->rb_node));
                if (entry->inode)
                        btrfs_add_delayed_iput(entry->inode);
                while (!list_empty(&entry->list)) {
@@ -579,6 +583,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
        spin_lock_irq(&tree->lock);
        node = &entry->rb_node;
        rb_erase(node, &tree->tree);
+       RB_CLEAR_NODE(node);
        if (tree->last == node)
                tree->last = NULL;
        set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
index d5f1f033b7a00f3c2e2826e336ae5ca5b3e31d4a..e9ace099162ce14d73d04523962465ee163b30c7 100644 (file)
@@ -1349,6 +1349,11 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
        struct btrfs_root *quota_root;
        struct btrfs_qgroup *qgroup;
        int ret = 0;
+       /* Sometimes we would want to clear the limit on this qgroup.
+        * To meet this requirement, we treat the -1 as a special value
+        * which tell kernel to clear the limit on this qgroup.
+        */
+       const u64 CLEAR_VALUE = -1;
 
        mutex_lock(&fs_info->qgroup_ioctl_lock);
        quota_root = fs_info->quota_root;
@@ -1364,14 +1369,42 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
        }
 
        spin_lock(&fs_info->qgroup_lock);
-       if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
-               qgroup->max_rfer = limit->max_rfer;
-       if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
-               qgroup->max_excl = limit->max_excl;
-       if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
-               qgroup->rsv_rfer = limit->rsv_rfer;
-       if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
-               qgroup->rsv_excl = limit->rsv_excl;
+       if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
+               if (limit->max_rfer == CLEAR_VALUE) {
+                       qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
+                       limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
+                       qgroup->max_rfer = 0;
+               } else {
+                       qgroup->max_rfer = limit->max_rfer;
+               }
+       }
+       if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
+               if (limit->max_excl == CLEAR_VALUE) {
+                       qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
+                       limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
+                       qgroup->max_excl = 0;
+               } else {
+                       qgroup->max_excl = limit->max_excl;
+               }
+       }
+       if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
+               if (limit->rsv_rfer == CLEAR_VALUE) {
+                       qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
+                       limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
+                       qgroup->rsv_rfer = 0;
+               } else {
+                       qgroup->rsv_rfer = limit->rsv_rfer;
+               }
+       }
+       if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
+               if (limit->rsv_excl == CLEAR_VALUE) {
+                       qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
+                       limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
+                       qgroup->rsv_excl = 0;
+               } else {
+                       qgroup->rsv_excl = limit->rsv_excl;
+               }
+       }
        qgroup->lim_flags |= limit->flags;
 
        spin_unlock(&fs_info->qgroup_lock);
index 827951fbf7fcb7dac026e48ded6e29f2f723adef..88cbb5995667951192b94c2627f759a771eda9e3 100644 (file)
@@ -4049,7 +4049,7 @@ restart:
        if (trans && progress && err == -ENOSPC) {
                ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
                                              rc->block_group->flags);
-               if (ret == 0) {
+               if (ret == 1) {
                        err = 0;
                        progress = 0;
                        goto restart;
index 9f2feabe99f211f9c8eb5d489c44c85f54754c27..94db0fa5225a9fd493163fc117cd25d8b01b7343 100644 (file)
@@ -3571,7 +3571,6 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
                                                int is_dev_replace)
 {
-       int ret = 0;
        unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
        int max_active = fs_info->thread_pool_size;
 
@@ -3584,34 +3583,36 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
                        fs_info->scrub_workers =
                                btrfs_alloc_workqueue("btrfs-scrub", flags,
                                                      max_active, 4);
-               if (!fs_info->scrub_workers) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               if (!fs_info->scrub_workers)
+                       goto fail_scrub_workers;
+
                fs_info->scrub_wr_completion_workers =
                        btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
                                              max_active, 2);
-               if (!fs_info->scrub_wr_completion_workers) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               if (!fs_info->scrub_wr_completion_workers)
+                       goto fail_scrub_wr_completion_workers;
+
                fs_info->scrub_nocow_workers =
                        btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
-               if (!fs_info->scrub_nocow_workers) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               if (!fs_info->scrub_nocow_workers)
+                       goto fail_scrub_nocow_workers;
                fs_info->scrub_parity_workers =
                        btrfs_alloc_workqueue("btrfs-scrubparity", flags,
                                              max_active, 2);
-               if (!fs_info->scrub_parity_workers) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               if (!fs_info->scrub_parity_workers)
+                       goto fail_scrub_parity_workers;
        }
        ++fs_info->scrub_workers_refcnt;
-out:
-       return ret;
+       return 0;
+
+fail_scrub_parity_workers:
+       btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
+fail_scrub_nocow_workers:
+       btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
+fail_scrub_wr_completion_workers:
+       btrfs_destroy_workqueue(fs_info->scrub_workers);
+fail_scrub_workers:
+       return -ENOMEM;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
index 1ce80c1c4eb6c1a414cfe88009c5c1c0e5c14132..9c45431e69aba3a7a8e2d0bcfb8c3b4a18182df2 100644 (file)
@@ -4117,6 +4117,187 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
        return 0;
 }
 
+/*
+ * At the moment we always log all xattrs. This is to figure out at log replay
+ * time which xattrs must have their deletion replayed. If a xattr is missing
+ * in the log tree and exists in the fs/subvol tree, we delete it. This is
+ * because if a xattr is deleted, the inode is fsynced and a power failure
+ * happens, causing the log to be replayed the next time the fs is mounted,
+ * we want the xattr to not exist anymore (same behaviour as other filesystems
+ * with a journal, ext3/4, xfs, f2fs, etc).
+ */
+static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root,
+                               struct inode *inode,
+                               struct btrfs_path *path,
+                               struct btrfs_path *dst_path)
+{
+       int ret;
+       struct btrfs_key key;
+       const u64 ino = btrfs_ino(inode);
+       int ins_nr = 0;
+       int start_slot = 0;
+
+       key.objectid = ino;
+       key.type = BTRFS_XATTR_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               return ret;
+
+       while (true) {
+               int slot = path->slots[0];
+               struct extent_buffer *leaf = path->nodes[0];
+               int nritems = btrfs_header_nritems(leaf);
+
+               if (slot >= nritems) {
+                       if (ins_nr > 0) {
+                               u64 last_extent = 0;
+
+                               ret = copy_items(trans, inode, dst_path, path,
+                                                &last_extent, start_slot,
+                                                ins_nr, 1, 0);
+                               /* can't be 1, extent items aren't processed */
+                               ASSERT(ret <= 0);
+                               if (ret < 0)
+                                       return ret;
+                               ins_nr = 0;
+                       }
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               return ret;
+                       else if (ret > 0)
+                               break;
+                       continue;
+               }
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
+                       break;
+
+               if (ins_nr == 0)
+                       start_slot = slot;
+               ins_nr++;
+               path->slots[0]++;
+               cond_resched();
+       }
+       if (ins_nr > 0) {
+               u64 last_extent = 0;
+
+               ret = copy_items(trans, inode, dst_path, path,
+                                &last_extent, start_slot,
+                                ins_nr, 1, 0);
+               /* can't be 1, extent items aren't processed */
+               ASSERT(ret <= 0);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * If the no holes feature is enabled we need to make sure any hole between the
+ * last extent and the i_size of our inode is explicitly marked in the log. This
+ * is to make sure that doing something like:
+ *
+ *      1) create file with 128Kb of data
+ *      2) truncate file to 64Kb
+ *      3) truncate file to 256Kb
+ *      4) fsync file
+ *      5) <crash/power failure>
+ *      6) mount fs and trigger log replay
+ *
+ * Will give us a file with a size of 256Kb, the first 64Kb of data match what
+ * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
+ * file correspond to a hole. The presence of explicit holes in a log tree is
+ * what guarantees that log replay will remove/adjust file extent items in the
+ * fs/subvol tree.
+ *
+ * Here we do not need to care about holes between extents, that is already done
+ * by copy_items(). We also only need to do this in the full sync path, where we
+ * lookup for extents from the fs/subvol tree only. In the fast path case, we
+ * lookup the list of modified extent maps and if any represents a hole, we
+ * insert a corresponding extent representing a hole in the log tree.
+ */
+static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root,
+                                  struct inode *inode,
+                                  struct btrfs_path *path)
+{
+       int ret;
+       struct btrfs_key key;
+       u64 hole_start;
+       u64 hole_size;
+       struct extent_buffer *leaf;
+       struct btrfs_root *log = root->log_root;
+       const u64 ino = btrfs_ino(inode);
+       const u64 i_size = i_size_read(inode);
+
+       if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
+               return 0;
+
+       key.objectid = ino;
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = (u64)-1;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       ASSERT(ret != 0);
+       if (ret < 0)
+               return ret;
+
+       ASSERT(path->slots[0] > 0);
+       path->slots[0]--;
+       leaf = path->nodes[0];
+       btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+       if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
+               /* inode does not have any extents */
+               hole_start = 0;
+               hole_size = i_size;
+       } else {
+               struct btrfs_file_extent_item *extent;
+               u64 len;
+
+               /*
+                * If there's an extent beyond i_size, an explicit hole was
+                * already inserted by copy_items().
+                */
+               if (key.offset >= i_size)
+                       return 0;
+
+               extent = btrfs_item_ptr(leaf, path->slots[0],
+                                       struct btrfs_file_extent_item);
+
+               if (btrfs_file_extent_type(leaf, extent) ==
+                   BTRFS_FILE_EXTENT_INLINE) {
+                       len = btrfs_file_extent_inline_len(leaf,
+                                                          path->slots[0],
+                                                          extent);
+                       ASSERT(len == i_size);
+                       return 0;
+               }
+
+               len = btrfs_file_extent_num_bytes(leaf, extent);
+               /* Last extent goes beyond i_size, no need to log a hole. */
+               if (key.offset + len > i_size)
+                       return 0;
+               hole_start = key.offset + len;
+               hole_size = i_size - hole_start;
+       }
+       btrfs_release_path(path);
+
+       /* Last extent ends at i_size. */
+       if (hole_size == 0)
+               return 0;
+
+       hole_size = ALIGN(hole_size, root->sectorsize);
+       ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
+                                      hole_size, 0, hole_size, 0, 0, 0);
+       return ret;
+}
+
 /* log a single inode in the tree log.
  * At least one parent directory for this inode must exist in the tree
  * or be logged already.
@@ -4155,6 +4336,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        u64 ino = btrfs_ino(inode);
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        u64 logged_isize = 0;
+       bool need_log_inode_item = true;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -4263,11 +4445,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                } else {
                        if (inode_only == LOG_INODE_ALL)
                                fast_search = true;
-                       ret = log_inode_item(trans, log, dst_path, inode);
-                       if (ret) {
-                               err = ret;
-                               goto out_unlock;
-                       }
                        goto log_extents;
                }
 
@@ -4290,6 +4467,28 @@ again:
                if (min_key.type > max_key.type)
                        break;
 
+               if (min_key.type == BTRFS_INODE_ITEM_KEY)
+                       need_log_inode_item = false;
+
+               /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
+               if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
+                       if (ins_nr == 0)
+                               goto next_slot;
+                       ret = copy_items(trans, inode, dst_path, path,
+                                        &last_extent, ins_start_slot,
+                                        ins_nr, inode_only, logged_isize);
+                       if (ret < 0) {
+                               err = ret;
+                               goto out_unlock;
+                       }
+                       ins_nr = 0;
+                       if (ret) {
+                               btrfs_release_path(path);
+                               continue;
+                       }
+                       goto next_slot;
+               }
+
                src = path->nodes[0];
                if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
                        ins_nr++;
@@ -4357,9 +4556,26 @@ next_slot:
                ins_nr = 0;
        }
 
+       btrfs_release_path(path);
+       btrfs_release_path(dst_path);
+       err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
+       if (err)
+               goto out_unlock;
+       if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
+               btrfs_release_path(path);
+               btrfs_release_path(dst_path);
+               err = btrfs_log_trailing_hole(trans, root, inode, path);
+               if (err)
+                       goto out_unlock;
+       }
 log_extents:
        btrfs_release_path(path);
        btrfs_release_path(dst_path);
+       if (need_log_inode_item) {
+               err = log_inode_item(trans, log, dst_path, inode);
+               if (err)
+                       goto out_unlock;
+       }
        if (fast_search) {
                /*
                 * Some ordered extents started by fsync might have completed
index 4b438b4c8c9195b3aaf11519339719efbef7b21b..fbe7c104531c9e5eaf8b347f46d0b8a783f88322 100644 (file)
@@ -2766,6 +2766,20 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        root = root->fs_info->chunk_root;
        extent_root = root->fs_info->extent_root;
 
+       /*
+        * Prevent races with automatic removal of unused block groups.
+        * After we relocate and before we remove the chunk with offset
+        * chunk_offset, automatic removal of the block group can kick in,
+        * resulting in a failure when calling btrfs_remove_chunk() below.
+        *
+        * Make sure to acquire this mutex before doing a tree search (dev
+        * or chunk trees) to find chunks. Otherwise the cleaner kthread might
+        * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
+        * we release the path used to search the chunk/dev tree and before
+        * the current task acquires this mutex and calls us.
+        */
+       ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
+
        ret = btrfs_can_relocate(extent_root, chunk_offset);
        if (ret)
                return -ENOSPC;
@@ -2814,13 +2828,18 @@ again:
        key.type = BTRFS_CHUNK_ITEM_KEY;
 
        while (1) {
+               mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
-               if (ret < 0)
+               if (ret < 0) {
+                       mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                        goto error;
+               }
                BUG_ON(ret == 0); /* Corruption */
 
                ret = btrfs_previous_item(chunk_root, path, key.objectid,
                                          key.type);
+               if (ret)
+                       mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                if (ret < 0)
                        goto error;
                if (ret > 0)
@@ -2843,6 +2862,7 @@ again:
                        else
                                BUG_ON(ret);
                }
+               mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
 
                if (found_key.offset == 0)
                        break;
@@ -3299,9 +3319,12 @@ again:
                        goto error;
                }
 
+               mutex_lock(&fs_info->delete_unused_bgs_mutex);
                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
-               if (ret < 0)
+               if (ret < 0) {
+                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                        goto error;
+               }
 
                /*
                 * this shouldn't happen, it means the last relocate
@@ -3313,6 +3336,7 @@ again:
                ret = btrfs_previous_item(chunk_root, path, 0,
                                          BTRFS_CHUNK_ITEM_KEY);
                if (ret) {
+                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                        ret = 0;
                        break;
                }
@@ -3321,8 +3345,10 @@ again:
                slot = path->slots[0];
                btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
-               if (found_key.objectid != key.objectid)
+               if (found_key.objectid != key.objectid) {
+                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                        break;
+               }
 
                chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
 
@@ -3335,10 +3361,13 @@ again:
                ret = should_balance_chunk(chunk_root, leaf, chunk,
                                           found_key.offset);
                btrfs_release_path(path);
-               if (!ret)
+               if (!ret) {
+                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                        goto loop;
+               }
 
                if (counting) {
+                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                        spin_lock(&fs_info->balance_lock);
                        bctl->stat.expected++;
                        spin_unlock(&fs_info->balance_lock);
@@ -3348,6 +3377,7 @@ again:
                ret = btrfs_relocate_chunk(chunk_root,
                                           found_key.objectid,
                                           found_key.offset);
+               mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                if (ret && ret != -ENOSPC)
                        goto error;
                if (ret == -ENOSPC) {
@@ -4087,11 +4117,16 @@ again:
        key.type = BTRFS_DEV_EXTENT_KEY;
 
        do {
+               mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-               if (ret < 0)
+               if (ret < 0) {
+                       mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                        goto done;
+               }
 
                ret = btrfs_previous_item(root, path, 0, key.type);
+               if (ret)
+                       mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                if (ret < 0)
                        goto done;
                if (ret) {
@@ -4105,6 +4140,7 @@ again:
                btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
                if (key.objectid != device->devid) {
+                       mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                        btrfs_release_path(path);
                        break;
                }
@@ -4113,6 +4149,7 @@ again:
                length = btrfs_dev_extent_length(l, dev_extent);
 
                if (key.offset + length <= new_size) {
+                       mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                        btrfs_release_path(path);
                        break;
                }
@@ -4122,6 +4159,7 @@ again:
                btrfs_release_path(path);
 
                ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
+               mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
                if (ret && ret != -ENOSPC)
                        goto done;
                if (ret == -ENOSPC)
@@ -5715,7 +5753,6 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e
 static void btrfs_end_bio(struct bio *bio, int err)
 {
        struct btrfs_bio *bbio = bio->bi_private;
-       struct btrfs_device *dev = bbio->stripes[0].dev;
        int is_orig_bio = 0;
 
        if (err) {
@@ -5723,6 +5760,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                if (err == -EIO || err == -EREMOTEIO) {
                        unsigned int stripe_index =
                                btrfs_io_bio(bio)->stripe_index;
+                       struct btrfs_device *dev;
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
                        dev = bbio->stripes[stripe_index].dev;
index 6b8e2f091f5b8fd71d63d545ea5fe9ae593d7688..48851f6ea6ecc9af8c54b9e664909c878dc26bb7 100644 (file)
@@ -896,6 +896,7 @@ COMPATIBLE_IOCTL(FIGETBSZ)
 /* 'X' - originally XFS but some now in the VFS */
 COMPATIBLE_IOCTL(FIFREEZE)
 COMPATIBLE_IOCTL(FITHAW)
+COMPATIBLE_IOCTL(FITRIM)
 COMPATIBLE_IOCTL(KDGETKEYCODE)
 COMPATIBLE_IOCTL(KDSETKEYCODE)
 COMPATIBLE_IOCTL(KDGKBTYPE)
index 7a3f3e5f9ceabfc4cad41a5d5478258f7e817dc1..5c8ea15e73a53b6b6dbe3e9660973d2eda9c7800 100644 (file)
@@ -642,7 +642,7 @@ static inline bool fast_dput(struct dentry *dentry)
 
        /*
         * If we have a d_op->d_delete() operation, we sould not
-        * let the dentry count go to zero, so use "put__or_lock".
+        * let the dentry count go to zero, so use "put_or_lock".
         */
        if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
                return lockref_put_or_lock(&dentry->d_lockref);
@@ -697,7 +697,7 @@ static inline bool fast_dput(struct dentry *dentry)
         */
        smp_rmb();
        d_flags = ACCESS_ONCE(dentry->d_flags);
-       d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
+       d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
 
        /* Nothing to do? Dropping the reference was all we needed? */
        if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
@@ -776,6 +776,9 @@ repeat:
        if (unlikely(d_unhashed(dentry)))
                goto kill_it;
 
+       if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+               goto kill_it;
+
        if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
                if (dentry->d_op->d_delete(dentry))
                        goto kill_it;
index 72afcc629d7b1fee2963fb90e1691a221eb77d8e..feef8a9c4de7cf09bcbc0effb45f83c207c9015b 100644 (file)
@@ -325,7 +325,6 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return rc;
 
        switch (cmd) {
-       case FITRIM:
        case FS_IOC32_GETFLAGS:
        case FS_IOC32_SETFLAGS:
        case FS_IOC32_GETVERSION:
index aadb7282883493597f8dae099f20c3e86694bea8..2553aa8b608d84d1673190ea634c5e84c86d9f0b 100644 (file)
@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
        struct buffer_head              *bh;
        int                             err;
 
-       bh = sb_getblk(inode->i_sb, pblk);
+       bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return ERR_PTR(-ENOMEM);
 
@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                err = -EIO;
                goto cleanup;
        }
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh)) {
                err = -ENOMEM;
                goto cleanup;
@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        if (newblock == 0)
                return err;
 
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return -ENOMEM;
        lock_buffer(bh);
index 41f8e55afcd11491c5f25bf0989f74c12b9baf69..cecf9aa1081134d255455ee329a4e118b8355216 100644 (file)
@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page,
                                             unsigned int offset,
                                             unsigned int length)
 {
-       int to_release = 0;
+       int to_release = 0, contiguous_blks = 0;
        struct buffer_head *head, *bh;
        unsigned int curr_off = 0;
        struct inode *inode = page->mapping->host;
@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page,
 
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
+                       contiguous_blks++;
                        clear_buffer_delay(bh);
+               } else if (contiguous_blks) {
+                       lblk = page->index <<
+                              (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                       lblk += (curr_off >> inode->i_blkbits) -
+                               contiguous_blks;
+                       ext4_es_remove_extent(inode, lblk, contiguous_blks);
+                       contiguous_blks = 0;
                }
                curr_off = next_off;
        } while ((bh = bh->b_this_page) != head);
 
-       if (to_release) {
+       if (contiguous_blks) {
                lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-               ext4_es_remove_extent(inode, lblk, to_release);
+               lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+               ext4_es_remove_extent(inode, lblk, contiguous_blks);
        }
 
        /* If we have released all the blocks belonging to a cluster, then we
@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
        int inode_size = EXT4_INODE_SIZE(sb);
 
        oi.orig_ino = orig_ino;
-       ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
+       /*
+        * Calculate the first inode in the inode table block.  Inode
+        * numbers are one-based.  That is, the first inode in a block
+        * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
+        */
+       ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
        for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
                if (ino == orig_ino)
                        continue;
index cb8451246b30ae72c9787fe0e7326ac9c77b7b29..1346cfa355d0f167837a68d08325fa3529ebbc31 100644 (file)
@@ -755,7 +755,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return err;
        }
        case EXT4_IOC_MOVE_EXT:
-       case FITRIM:
        case EXT4_IOC_RESIZE_FS:
        case EXT4_IOC_PRECACHE_EXTENTS:
        case EXT4_IOC_SET_ENCRYPTION_POLICY:
index f6aedf88da437ee324c314bb1020baa51db0423c..34b610ea503053674b3b87ffd3503d6c641c573d 100644 (file)
@@ -4816,18 +4816,12 @@ do_more:
                /*
                 * blocks being freed are metadata. these blocks shouldn't
                 * be used until this transaction is committed
+                *
+                * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+                * to fail.
                 */
-       retry:
-               new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-               if (!new_entry) {
-                       /*
-                        * We use a retry loop because
-                        * ext4_free_blocks() is not allowed to fail.
-                        */
-                       cond_resched();
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
-                       goto retry;
-               }
+               new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+                               GFP_NOFS|__GFP_NOFAIL);
                new_entry->efd_start_cluster = bit;
                new_entry->efd_group = block_group;
                new_entry->efd_count = count_clusters;
index b52374e4210221bc02c9cb3ad23f8027f21e77ad..6163ad21cb0ef6b184bdfceb700c1dfd89317590 100644 (file)
@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
        struct ext4_inode_info          *ei = EXT4_I(inode);
        struct ext4_extent              *ex;
        unsigned int                    i, len;
+       ext4_lblk_t                     start, end;
        ext4_fsblk_t                    blk;
        handle_t                        *handle;
        int                             ret;
@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
                                       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
                return -EOPNOTSUPP;
 
+       /*
+        * In order to get correct extent info, force all delayed allocation
+        * blocks to be allocated, otherwise delayed allocation blocks may not
+        * be reflected and bypass the checks on extent header.
+        */
+       if (test_opt(inode->i_sb, DELALLOC))
+               ext4_alloc_da_blocks(inode);
+
        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
                goto errout;
        }
        if (eh->eh_entries == 0)
-               blk = len = 0;
+               blk = len = start = end = 0;
        else {
                len = le16_to_cpu(ex->ee_len);
                blk = ext4_ext_pblock(ex);
-               if (len > EXT4_NDIR_BLOCKS) {
+               start = le32_to_cpu(ex->ee_block);
+               end = start + len - 1;
+               if (end >= EXT4_NDIR_BLOCKS) {
                        ret = -EOPNOTSUPP;
                        goto errout;
                }
@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
 
        ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
        memset(ei->i_data, 0, sizeof(ei->i_data));
-       for (i=0; i < len; i++)
+       for (i = start; i <= end; i++)
                ei->i_data[i] = cpu_to_le32(blk++);
        ext4_mark_inode_dirty(handle, inode);
 errout:
index f005046e1591eaa343a4eba0e34b1bbc1e31b4f8..d6a4b55d2ab0512aa34221564f7cff421be0e152 100644 (file)
@@ -484,3 +484,98 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a
        a->btree.first_free = cpu_to_le16(8);
        return a;
 }
+
+static unsigned find_run(__le32 *bmp, unsigned *idx)
+{
+       unsigned len;
+       while (tstbits(bmp, *idx, 1)) {
+               (*idx)++;
+               if (unlikely(*idx >= 0x4000))
+                       return 0;
+       }
+       len = 1;
+       while (!tstbits(bmp, *idx + len, 1))
+               len++;
+       return len;
+}
+
+static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
+{
+       int err;
+       secno end;
+       if (fatal_signal_pending(current))
+               return -EINTR;
+       end = start + len;
+       if (start < limit_start)
+               start = limit_start;
+       if (end > limit_end)
+               end = limit_end;
+       if (start >= end)
+               return 0;
+       if (end - start < minlen)
+               return 0;
+       err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
+       if (err)
+               return err;
+       *result += end - start;
+       return 0;
+}
+
+int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
+{
+       int err = 0;
+       struct hpfs_sb_info *sbi = hpfs_sb(s);
+       unsigned idx, len, start_bmp, end_bmp;
+       __le32 *bmp;
+       struct quad_buffer_head qbh;
+
+       *result = 0;
+       if (!end || end > sbi->sb_fs_size)
+               end = sbi->sb_fs_size;
+       if (start >= sbi->sb_fs_size)
+               return 0;
+       if (minlen > 0x4000)
+               return 0;
+       if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
+               hpfs_lock(s);
+               if (s->s_flags & MS_RDONLY) {
+                       err = -EROFS;
+                       goto unlock_1;
+               }
+               if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
+                       err = -EIO;
+                       goto unlock_1;
+               }
+               idx = 0;
+               while ((len = find_run(bmp, &idx)) && !err) {
+                       err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
+                       idx += len;
+               }
+               hpfs_brelse4(&qbh);
+unlock_1:
+               hpfs_unlock(s);
+       }
+       start_bmp = start >> 14;
+       end_bmp = (end + 0x3fff) >> 14;
+       while (start_bmp < end_bmp && !err) {
+               hpfs_lock(s);
+               if (s->s_flags & MS_RDONLY) {
+                       err = -EROFS;
+                       goto unlock_2;
+               }
+               if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
+                       err = -EIO;
+                       goto unlock_2;
+               }
+               idx = 0;
+               while ((len = find_run(bmp, &idx)) && !err) {
+                       err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
+                       idx += len;
+               }
+               hpfs_brelse4(&qbh);
+unlock_2:
+               hpfs_unlock(s);
+               start_bmp++;
+       }
+       return err;
+}
index 2a8e07425de01eab5f8a40b5505268b4cc6e2014..dc540bfcee1d8e7842b6b0f0ca402214f9bf5609 100644 (file)
@@ -327,4 +327,5 @@ const struct file_operations hpfs_dir_ops =
        .iterate        = hpfs_readdir,
        .release        = hpfs_dir_release,
        .fsync          = hpfs_file_fsync,
+       .unlocked_ioctl = hpfs_ioctl,
 };
index 6d8cfe9b52d611b7dadbfa1a38ec7cf3c3b3671c..7ca28d604bf7679b3d112a227c196da04f1734e2 100644 (file)
@@ -203,6 +203,7 @@ const struct file_operations hpfs_file_ops =
        .release        = hpfs_file_release,
        .fsync          = hpfs_file_fsync,
        .splice_read    = generic_file_splice_read,
+       .unlocked_ioctl = hpfs_ioctl,
 };
 
 const struct inode_operations hpfs_file_iops =
index bb04b58d1d698486ec31c4fd3e91dab567dbb517..c4867b5116ddf72b3617c8946c7d6b27dd03cd87 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
 #include <asm/unaligned.h>
 
 #include "hpfs.h"
@@ -200,6 +202,7 @@ void hpfs_free_dnode(struct super_block *, secno);
 struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *);
 struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
 struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
+int hpfs_trim_fs(struct super_block *, u64, u64, u64, unsigned *);
 
 /* anode.c */
 
@@ -318,6 +321,7 @@ __printf(2, 3)
 void hpfs_error(struct super_block *, const char *, ...);
 int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
 unsigned hpfs_get_free_dnodes(struct super_block *);
+long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg);
 
 /*
  * local time (HPFS) to GMT (Unix)
index 7cd00d3a7c9b742a86d9fdb16a36b805952b2cf9..68a9bed056284c5a4304c10d607f78f1d4638538 100644 (file)
@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
 }
 
 /* Filesystem error... */
-static char err_buf[1024];
-
 void hpfs_error(struct super_block *s, const char *fmt, ...)
 {
+       struct va_format vaf;
        va_list args;
 
        va_start(args, fmt);
-       vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       pr_err("filesystem error: %pV", &vaf);
+
        va_end(args);
 
-       pr_err("filesystem error: %s", err_buf);
        if (!hpfs_sb(s)->sb_was_error) {
                if (hpfs_sb(s)->sb_err == 2) {
                        pr_cont("; crashing the system because you wanted it\n");
@@ -196,12 +199,39 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
+
+long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+       switch (cmd) {
+               case FITRIM: {
+                       struct fstrim_range range;
+                       secno n_trimmed;
+                       int r;
+                       if (!capable(CAP_SYS_ADMIN))
+                               return -EPERM;
+                       if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
+                               return -EFAULT;
+                       r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed);
+                       if (r)
+                               return r;
+                       range.len = (u64)n_trimmed << 9;
+                       if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range)))
+                               return -EFAULT;
+                       return 0;
+               }
+               default: {
+                       return -ENOIOCTLCMD;
+               }
+       }
+}
+
+
 static struct kmem_cache * hpfs_inode_cachep;
 
 static struct inode *hpfs_alloc_inode(struct super_block *sb)
 {
        struct hpfs_inode_info *ei;
-       ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
+       ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
        ei->vfs_inode.i_version = 1;
@@ -424,11 +454,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        int o;
        struct hpfs_sb_info *sbi = hpfs_sb(s);
        char *new_opts = kstrdup(data, GFP_KERNEL);
-       
+
+       if (!new_opts)
+               return -ENOMEM;
+
        sync_filesystem(s);
 
        *flags |= MS_NOATIME;
-       
+
        hpfs_lock(s);
        uid = sbi->sb_uid; gid = sbi->sb_gid;
        umask = 0777 & ~sbi->sb_mode;
index 93a1232894f60a07abd8c6b1cdbbb6239270ed27..8db8b7d61e4048cf97ac289f263bc78c8685294d 100644 (file)
@@ -180,9 +180,6 @@ long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case JFS_IOC_SETFLAGS32:
                cmd = JFS_IOC_SETFLAGS;
                break;
-       case FITRIM:
-               cmd = FITRIM;
-               break;
        }
        return jfs_ioctl(filp, cmd, arg);
 }
index 9a20e513d7eb89ee21228e6af0a723996c4150a5..aba43811d6ef1221c68b275afda0174f072343bf 100644 (file)
@@ -1369,7 +1369,6 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case NILFS_IOCTL_SYNC:
        case NILFS_IOCTL_RESIZE:
        case NILFS_IOCTL_SET_ALLOC_RANGE:
-       case FITRIM:
                break;
        default:
                return -ENOIOCTLCMD;
index 53e6c40ed4c6a451bc38d0727176c0827e564930..3cb097ccce607a8c0e6ebb1679a80542182f58b5 100644 (file)
@@ -980,7 +980,6 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case OCFS2_IOC_GROUP_EXTEND:
        case OCFS2_IOC_GROUP_ADD:
        case OCFS2_IOC_GROUP_ADD64:
-       case FITRIM:
                break;
        case OCFS2_IOC_REFLINK:
                if (copy_from_user(&args, argp, sizeof(args)))
index f140e3dbfb7bdeb967cad4639c9e32c236a3cced..d9da5a4e93821ddbee9f2fb449666c207417e523 100644 (file)
@@ -343,6 +343,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
        struct path realpath;
        enum ovl_path_type type;
 
+       if (d_is_dir(dentry))
+               return d_backing_inode(dentry);
+
        type = ovl_path_real(dentry, &realpath);
        if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
                err = ovl_want_write(dentry);
index c471dfc93b716e162815709a25e5a705c4b45159..d2445fa9999f9638d99861c1b03b4d0ca3e47c57 100644 (file)
@@ -58,6 +58,19 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
        acpi_fwnode_handle(adev) : NULL)
 #define ACPI_HANDLE(dev)               acpi_device_handle(ACPI_COMPANION(dev))
 
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk)  .cls = (_cls), .cls_msk = (_msk),
+
 static inline bool has_acpi_companion(struct device *dev)
 {
        return is_acpi_node(dev->fwnode);
@@ -309,9 +322,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 
 int acpi_resources_are_enforced(void);
 
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
-                       unsigned long flags, char *desc);
-
 #ifdef CONFIG_HIBERNATION
 void __init acpi_no_s4_hw_signature(void);
 #endif
@@ -446,6 +456,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
 #define ACPI_COMPANION(dev)            (NULL)
 #define ACPI_COMPANION_SET(dev, adev)  do { } while (0)
 #define ACPI_HANDLE(dev)               (NULL)
+#define ACPI_DEVICE_CLASS(_cls, _msk)  .cls = (0), .cls_msk = (0),
 
 struct fwnode_handle;
 
@@ -507,13 +518,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
        return 0;
 }
 
-static inline int acpi_reserve_region(u64 start, unsigned int length,
-                                     u8 space_id, unsigned long flags,
-                                     char *desc)
-{
-       return -ENXIO;
-}
-
 struct acpi_table_header;
 static inline int acpi_table_parse(char *id,
                                int (*handler)(struct acpi_table_header *))
index 73b45225a7ca1bbe749ea3b62e38a35add22936b..e6797ded700ec8f7ef6a8fccccddf71212715077 100644 (file)
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
        return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 }
 
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+       return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
 static inline struct buffer_head *
 sb_find_get_block(struct super_block *sb, sector_t block)
 {
index b6a52a4b457aaaff2db3f55e2fd3bb78d07a6ab1..51bb6532785c3b9918698c3761d5a6e5088e176c 100644 (file)
 /**
  * struct can_skb_priv - private additional data inside CAN sk_buffs
  * @ifindex:   ifindex of the first interface the CAN frame appeared on
+ * @skbcnt:    atomic counter to have an unique id together with skb pointer
  * @cf:                align to the following CAN frame at skb->data
  */
 struct can_skb_priv {
        int ifindex;
+       int skbcnt;
        struct can_frame cf[0];
 };
 
index e15499422fdcc7686942ce88389d686d9078cd79..37753278987ac654a5e00797e6d6b81e4b2353c0 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/radix-tree.h>
 #include <linux/uio.h>
 #include <linux/workqueue.h>
+#include <net/net_namespace.h>
 
 #include <linux/ceph/types.h>
 #include <linux/ceph/buffer.h>
@@ -56,6 +57,7 @@ struct ceph_messenger {
        struct ceph_entity_addr my_enc_addr;
 
        atomic_t stopping;
+       possible_net_t net;
        bool nocrc;
        bool tcp_nodelay;
 
@@ -267,6 +269,7 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
                        u64 required_features,
                        bool nocrc,
                        bool tcp_nodelay);
+extern void ceph_messenger_fini(struct ceph_messenger *msgr);
 
 extern void ceph_con_init(struct ceph_connection *con, void *private,
                        const struct ceph_connection_operations *ops,
index 7f8ad9593da725438aaefb397b99d9fea4109f90..e08a6ae7c0a422fa72fe0b05d47faf14e651e5fb 100644 (file)
 # define __release(x)  __context__(x,-1)
 # define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
 # define __percpu      __attribute__((noderef, address_space(3)))
+# define __pmem                __attribute__((noderef, address_space(5)))
 #ifdef CONFIG_SPARSE_RCU_POINTER
 # define __rcu         __attribute__((noderef, address_space(4)))
 #else
 # define __rcu
-# define __pmem                __attribute__((noderef, address_space(5)))
 #endif
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
index 624a668e61f1a6c5096e2afa78bd403655c11e59..fcea4e48e21f5d7060b2451673abf9a8f47d0001 100644 (file)
@@ -87,7 +87,12 @@ struct irq_desc {
        const char              *name;
 } ____cacheline_internodealigned_in_smp;
 
-#ifndef CONFIG_SPARSE_IRQ
+#ifdef CONFIG_SPARSE_IRQ
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
+#else
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
 extern struct irq_desc irq_desc[NR_IRQS];
 #endif
 
index 8183d6640ca7ac24ffff5f011f8267c888f251f6..34f25b7bf6421789dc7393f6c3dbc5f4d8877c77 100644 (file)
@@ -189,6 +189,8 @@ struct css_device_id {
 struct acpi_device_id {
        __u8 id[ACPI_ID_LEN];
        kernel_ulong_t driver_data;
+       __u32 cls;
+       __u32 cls_msk;
 };
 
 #define PNP_ID_LEN     8
index 2c92e1c8e0553cbd71b16c2dea228f75b56bcd5b..aefd997262e4dabdabe2d6c88723455180e01f56 100644 (file)
@@ -9,10 +9,14 @@
 #ifndef _SIRFSOC_RTC_IOBRG_H_
 #define _SIRFSOC_RTC_IOBRG_H_
 
+struct regmap_config;
+
 extern void sirfsoc_rtc_iobrg_besyncing(void);
 
 extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
 
 extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+                                   const struct regmap_config *config);
 
 #endif
index 3741ba1a652c9716724bbef2ae1e9c12bc82aa8a..edbfc9a5293ee48f981952eb709f0ed13e6b07c8 100644 (file)
@@ -67,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
 static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
 #endif /* BROADCAST */
 
-#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
 #else
-static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; }
+static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+       return 0;
+}
 #endif
 
 static inline void tick_broadcast_enable(void)
index 3aa72e64865021ef0efd15c0511fee55a520c19e..6e191e4e6ab637d8924528c61dc551711d085df2 100644 (file)
@@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts)
 }
 #endif
 
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
 #define ktime_get_real_ts64(ts)        getnstimeofday64(ts)
 
 /*
index 7c9b484735c533ed4243be050d8ad2e8c67d581a..1f6526c76ee84f4068707bd0ecae16aaa25c0759 100644 (file)
@@ -80,6 +80,9 @@
 #define CDC_NCM_TIMER_INTERVAL_MIN             5UL
 #define CDC_NCM_TIMER_INTERVAL_MAX             (U32_MAX / NSEC_PER_USEC)
 
+/* Driver flags */
+#define CDC_NCM_FLAG_NDP_TO_END        0x02            /* NDP is placed at end of frame */
+
 #define cdc_ncm_comm_intf_is_mbim(x)  ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
                                       (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
 #define cdc_ncm_data_intf_is_mbim(x)  ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
 
        spinlock_t mtx;
        atomic_t stop;
+       int drvflags;
 
        u32 timer_interval;
        u32 max_ndp_size;
+       struct usb_cdc_ncm_ndp16 *delayed_ndp16;
 
        u32 tx_timer_pending;
        u32 tx_curr_frame_num;
@@ -133,7 +138,7 @@ struct cdc_ncm_ctx {
 };
 
 u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
 int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
index 669a1f0b1d976d2f6045b539f18e1fccbadc6809..23cbd34e4ac738de5c1d587be1028b5bca01ef86 100644 (file)
@@ -15,6 +15,7 @@ enum {
        NETCONFA_RP_FILTER,
        NETCONFA_MC_FORWARDING,
        NETCONFA_PROXY_NEIGH,
+       NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
        __NETCONFA_MAX
 };
 #define NETCONFA_MAX   (__NETCONFA_MAX - 1)
index 09c65640cad6156f0ea702bc32ba9847b9465249..e85bdfd15fedd4c8fed46818edae6191673bff07 100644 (file)
@@ -1021,8 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
         * for strings that are too long, we should not have created
         * any.
         */
-       if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) {
-               WARN_ON(1);
+       if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
                send_sig(SIGKILL, current, 0);
                return -1;
        }
index 9c9c9fab16cc3610afa76a6c467780482b35b0be..6a374544d495f1ff7b7f5bfd03fd798e30144a7e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/suspend.h>
 #include <linux/lockdep.h>
 #include <linux/tick.h>
+#include <linux/irq.h>
 #include <trace/events/power.h>
 
 #include "smpboot.h"
@@ -392,13 +393,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
        smpboot_park_threads(cpu);
 
        /*
-        * So now all preempt/rcu users must observe !cpu_active().
+        * Prevent irq alloc/free while the dying cpu reorganizes the
+        * interrupt affinities.
         */
+       irq_lock_sparse();
 
+       /*
+        * So now all preempt/rcu users must observe !cpu_active().
+        */
        err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
        if (err) {
                /* CPU didn't die: tell everyone.  Can't complain. */
                cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
+               irq_unlock_sparse();
                goto out_release;
        }
        BUG_ON(cpu_online(cpu));
@@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
        smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
        per_cpu(cpu_dead_idle, cpu) = false;
 
+       /* Interrupts are moved away from the dying cpu, reenable alloc/free */
+       irq_unlock_sparse();
+
        hotplug_cpu__broadcast_tick_pull(cpu);
        /* This actually kills the CPU. */
        __cpu_die(cpu);
@@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
                goto out_notify;
        }
 
+       /*
+        * Some architectures have to walk the irq descriptors to
+        * setup the vector space for the cpu which comes online.
+        * Prevent irq alloc/free across the bringup.
+        */
+       irq_lock_sparse();
+
        /* Arch-specific enabling code. */
        ret = __cpu_up(cpu, idle);
+
+       irq_unlock_sparse();
+
        if (ret != 0)
                goto out_notify;
        BUG_ON(!cpu_online(cpu));
index e965cfae420725645349c4facd145828e4e59d13..d3dae3419b99566c127f1682b29f39bb184bbdb1 100644 (file)
@@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
        rcu_read_unlock();
 }
 
-static void rb_free_rcu(struct rcu_head *rcu_head)
-{
-       struct ring_buffer *rb;
-
-       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
-       rb_free(rb);
-}
-
 struct ring_buffer *ring_buffer_get(struct perf_event *event)
 {
        struct ring_buffer *rb;
index 2deb24c7a40dd979313eb1fcff58044d2d948888..2bbad9c1274c3199338e653bbb5c8bd640815fa7 100644 (file)
@@ -11,6 +11,7 @@
 struct ring_buffer {
        atomic_t                        refcount;
        struct rcu_head                 rcu_head;
+       struct irq_work                 irq_work;
 #ifdef CONFIG_PERF_USE_VMALLOC
        struct work_struct              work;
        int                             page_order;     /* allocation order  */
@@ -55,6 +56,15 @@ struct ring_buffer {
 };
 
 extern void rb_free(struct ring_buffer *rb);
+
+static inline void rb_free_rcu(struct rcu_head *rcu_head)
+{
+       struct ring_buffer *rb;
+
+       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+       rb_free(rb);
+}
+
 extern struct ring_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 extern void perf_event_wakeup(struct perf_event *event);
index 96472824a752f76fe651ec1bfb7ab7a52411a12c..b2be01b1aa9dcb7a70792fa381c264b229a106d0 100644 (file)
@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
+static void rb_irq_work(struct irq_work *work);
+
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 
        INIT_LIST_HEAD(&rb->event_list);
        spin_lock_init(&rb->event_lock);
+       init_irq_work(&rb->irq_work, rb_irq_work);
+}
+
+static void ring_buffer_put_async(struct ring_buffer *rb)
+{
+       if (!atomic_dec_and_test(&rb->refcount))
+               return;
+
+       rb->rcu_head.next = (void *)rb;
+       irq_work_queue(&rb->irq_work);
 }
 
 /*
@@ -319,7 +331,7 @@ err_put:
        rb_free_aux(rb);
 
 err:
-       ring_buffer_put(rb);
+       ring_buffer_put_async(rb);
        handle->event = NULL;
 
        return NULL;
@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
 
        local_set(&rb->aux_nest, 0);
        rb_free_aux(rb);
-       ring_buffer_put(rb);
+       ring_buffer_put_async(rb);
 }
 
 /*
@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
+               irq_work_queue(&rb->irq_work);
+}
+
+static void rb_irq_work(struct irq_work *work)
+{
+       struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
+
+       if (!atomic_read(&rb->aux_refcount))
                __rb_free_aux(rb);
+
+       if (rb->rcu_head.next == (void *)rb)
+               call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index 4834ee828c41e8ee9dd8e8e4fb15e03e66c037e1..61008b8433ab78f5364ac2512d956fd554614690 100644 (file)
@@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
 
 #ifdef CONFIG_SPARSE_IRQ
 static inline void irq_mark_irq(unsigned int irq) { }
-extern void irq_lock_sparse(void);
-extern void irq_unlock_sparse(void);
 #else
 extern void irq_mark_irq(unsigned int irq);
-static inline void irq_lock_sparse(void) { }
-static inline void irq_unlock_sparse(void) { }
 #endif
 
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
index 3e0e19763d246a998acb109c6979e4df593e3958..4d2b82e610e2a48f429a700f0529d6dc5942700a 100644 (file)
@@ -3557,6 +3557,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
        mutex_lock(&module_mutex);
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
+       mod_tree_remove(mod);
        wake_up_all(&module_wq);
        /* Wait for RCU-sched synchronizing before releasing mod->list. */
        synchronize_sched();
index 08ccc3da3ca086366eadaae89f46e0604099d661..50eb107f119877ab0762bb671a69cf9c0cd418bc 100644 (file)
@@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev,
                /* The clockevent device is getting replaced. Shut it down. */
 
        case CLOCK_EVT_STATE_SHUTDOWN:
-               return dev->set_state_shutdown(dev);
+               if (dev->set_state_shutdown)
+                       return dev->set_state_shutdown(dev);
+               return 0;
 
        case CLOCK_EVT_STATE_PERIODIC:
                /* Core internal bug */
                if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
                        return -ENOSYS;
-               return dev->set_state_periodic(dev);
+               if (dev->set_state_periodic)
+                       return dev->set_state_periodic(dev);
+               return 0;
 
        case CLOCK_EVT_STATE_ONESHOT:
                /* Core internal bug */
                if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
                        return -ENOSYS;
-               return dev->set_state_oneshot(dev);
+               if (dev->set_state_oneshot)
+                       return dev->set_state_oneshot(dev);
+               return 0;
 
        case CLOCK_EVT_STATE_ONESHOT_STOPPED:
                /* Core internal bug */
@@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
        if (dev->features & CLOCK_EVT_FEAT_DUMMY)
                return 0;
 
-       /* New state-specific callbacks */
-       if (!dev->set_state_shutdown)
-               return -EINVAL;
-
-       if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
-           !dev->set_state_periodic)
-               return -EINVAL;
-
-       if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
-           !dev->set_state_oneshot)
-               return -EINVAL;
-
        return 0;
 }
 
index d39f32cdd1b59cca97066430442e3aa054cc9eed..52b9e199b5acc1e292d7e5a030685f6dbe7fdfe2 100644 (file)
@@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
 {
        struct clock_event_device *bc = tick_broadcast_device.evtdev;
        unsigned long flags;
-       int ret;
+       int ret = 0;
 
        raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
 
@@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
                         * If we kept the cpu in the broadcast mask,
                         * tell the caller to leave the per cpu device
                         * in shutdown state. The periodic interrupt
-                        * is delivered by the broadcast device.
+                        * is delivered by the broadcast device, if
+                        * the broadcast device exists and is not
+                        * hrtimer based.
                         */
-                       ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
+                       if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
+                               ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
                        break;
                default:
-                       /* Nothing to do */
-                       ret = 0;
                        break;
                }
        }
@@ -265,8 +266,22 @@ static bool tick_do_broadcast(struct cpumask *mask)
         * Check, if the current cpu is in the mask
         */
        if (cpumask_test_cpu(cpu, mask)) {
+               struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
                cpumask_clear_cpu(cpu, mask);
-               local = true;
+               /*
+                * We only run the local handler, if the broadcast
+                * device is not hrtimer based. Otherwise we run into
+                * a hrtimer recursion.
+                *
+                * local timer_interrupt()
+                *   local_handler()
+                *     expire_hrtimers()
+                *       bc_handler()
+                *         local_handler()
+                *           expire_hrtimers()
+                */
+               local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
        }
 
        if (!cpumask_empty(mask)) {
@@ -301,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
        bool bc_local;
 
        raw_spin_lock(&tick_broadcast_lock);
+
+       /* Handle spurious interrupts gracefully */
+       if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
+               raw_spin_unlock(&tick_broadcast_lock);
+               return;
+       }
+
        bc_local = tick_do_periodic_broadcast();
 
        if (clockevent_state_oneshot(dev)) {
@@ -359,8 +381,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
        case TICK_BROADCAST_ON:
                cpumask_set_cpu(cpu, tick_broadcast_on);
                if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
-                       if (tick_broadcast_device.mode ==
-                           TICKDEV_MODE_PERIODIC)
+                       /*
+                        * Only shutdown the cpu local device, if:
+                        *
+                        * - the broadcast device exists
+                        * - the broadcast device is not a hrtimer based one
+                        * - the broadcast device is in periodic mode to
+                        *   avoid a hickup during switch to oneshot mode
+                        */
+                       if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
+                           tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
                                clockevents_shutdown(dev);
                }
                break;
@@ -379,14 +409,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
                break;
        }
 
-       if (cpumask_empty(tick_broadcast_mask)) {
-               if (!bc_stopped)
-                       clockevents_shutdown(bc);
-       } else if (bc_stopped) {
-               if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
-                       tick_broadcast_start_periodic(bc);
-               else
-                       tick_broadcast_setup_oneshot(bc);
+       if (bc) {
+               if (cpumask_empty(tick_broadcast_mask)) {
+                       if (!bc_stopped)
+                               clockevents_shutdown(bc);
+               } else if (bc_stopped) {
+                       if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+                               tick_broadcast_start_periodic(bc);
+                       else
+                               tick_broadcast_setup_oneshot(bc);
+               }
        }
        raw_spin_unlock(&tick_broadcast_lock);
 }
@@ -662,71 +694,82 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
        clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
 }
 
-/**
- * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
- * @state:     The target state (enter/exit)
- *
- * The system enters/leaves a state, where affected devices might stop
- * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
- *
- * Called with interrupts disabled, so clockevents_lock is not
- * required here because the local clock event device cannot go away
- * under us.
- */
-int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
 {
        struct clock_event_device *bc, *dev;
-       struct tick_device *td;
        int cpu, ret = 0;
        ktime_t now;
 
        /*
-        * Periodic mode does not care about the enter/exit of power
-        * states
+        * If there is no broadcast device, tell the caller not to go
+        * into deep idle.
         */
-       if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
-               return 0;
+       if (!tick_broadcast_device.evtdev)
+               return -EBUSY;
 
-       /*
-        * We are called with preemtion disabled from the depth of the
-        * idle code, so we can't be moved away.
-        */
-       td = this_cpu_ptr(&tick_cpu_device);
-       dev = td->evtdev;
-
-       if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
-               return 0;
+       dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
 
        raw_spin_lock(&tick_broadcast_lock);
        bc = tick_broadcast_device.evtdev;
        cpu = smp_processor_id();
 
        if (state == TICK_BROADCAST_ENTER) {
+               /*
+                * If the current CPU owns the hrtimer broadcast
+                * mechanism, it cannot go deep idle and we do not add
+                * the CPU to the broadcast mask. We don't have to go
+                * through the EXIT path as the local timer is not
+                * shutdown.
+                */
+               ret = broadcast_needs_cpu(bc, cpu);
+               if (ret)
+                       goto out;
+
+               /*
+                * If the broadcast device is in periodic mode, we
+                * return.
+                */
+               if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
+                       /* If it is a hrtimer based broadcast, return busy */
+                       if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
+                               ret = -EBUSY;
+                       goto out;
+               }
+
                if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
+
+                       /* Conditionally shut down the local timer. */
                        broadcast_shutdown_local(bc, dev);
+
                        /*
                         * We only reprogram the broadcast timer if we
                         * did not mark ourself in the force mask and
                         * if the cpu local event is earlier than the
                         * broadcast event. If the current CPU is in
                         * the force mask, then we are going to be
-                        * woken by the IPI right away.
+                        * woken by the IPI right away; we return
+                        * busy, so the CPU does not try to go deep
+                        * idle.
                         */
-                       if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
-                           dev->next_event.tv64 < bc->next_event.tv64)
+                       if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
+                               ret = -EBUSY;
+                       } else if (dev->next_event.tv64 < bc->next_event.tv64) {
                                tick_broadcast_set_event(bc, cpu, dev->next_event);
+                               /*
+                                * In case of hrtimer broadcasts the
+                                * programming might have moved the
+                                * timer to this cpu. If yes, remove
+                                * us from the broadcast mask and
+                                * return busy.
+                                */
+                               ret = broadcast_needs_cpu(bc, cpu);
+                               if (ret) {
+                                       cpumask_clear_cpu(cpu,
+                                               tick_broadcast_oneshot_mask);
+                               }
+                       }
                }
-               /*
-                * If the current CPU owns the hrtimer broadcast
-                * mechanism, it cannot go deep idle and we remove the
-                * CPU from the broadcast mask. We don't have to go
-                * through the EXIT path as the local timer is not
-                * shutdown.
-                */
-               ret = broadcast_needs_cpu(bc, cpu);
-               if (ret)
-                       cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
        } else {
                if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
@@ -938,6 +981,16 @@ bool tick_broadcast_oneshot_available(void)
        return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
 }
 
+#else
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+       struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+       if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
+               return -EBUSY;
+
+       return 0;
+}
 #endif
 
 void __init tick_broadcast_init(void)
index 76446cb5dfe1a5280323f41c16299392e99efb93..55e13efff1ab0d7431b81ea951a66feec714728f 100644 (file)
@@ -343,6 +343,27 @@ out_bc:
        tick_install_broadcast_device(newdev);
 }
 
+/**
+ * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
+ * @state:     The target state (enter/exit)
+ *
+ * The system enters/leaves a state, where affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
+ *
+ * Called with interrupts disabled, so clockevents_lock is not
+ * required here because the local clock event device cannot go away
+ * under us.
+ */
+int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+       struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+
+       if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
+               return 0;
+
+       return __tick_broadcast_oneshot_control(state);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * Transfer the do_timer job away from a dying cpu.
index 42fdf4958bccd1c5d1593f4ebe578ea99477ddbf..a4a8d4e9baa13f37cbdceff5ffedc3bfc5fd8e4c 100644 (file)
@@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu);
 static inline void tick_cancel_sched_timer(int cpu) { }
 #endif
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
+#else
+static inline int
+__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+       return -EBUSY;
+}
+#endif
+
 #endif
index 777eda7d1ab4bff70b8aa465e336465239c9f1b9..39f24d6721e5a29d8016811d4f5332fca66c2ec2 100644 (file)
@@ -18,10 +18,6 @@ config KASAN
          For better error detection enable CONFIG_STACKTRACE,
          and add slub_debug=U to boot cmdline.
 
-config KASAN_SHADOW_OFFSET
-       hex
-       default 0xdffffc0000000000 if X86_64
-
 choice
        prompt "Instrumentation type"
        depends on KASAN
index a60a6d335a91a6aa90f019f77062e7be069939fd..cc0c69710dcf8a2c7474b759be72e8fa22c2ca1f 100644 (file)
@@ -610,6 +610,8 @@ next:
                iter->skip = 0;
        }
 
+       iter->p = NULL;
+
        /* Ensure we see any new tables. */
        smp_rmb();
 
@@ -620,8 +622,6 @@ next:
                return ERR_PTR(-EAGAIN);
        }
 
-       iter->p = NULL;
-
        return NULL;
 }
 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
index a84fbb772034f2e73eac300e254bd54f2a36ce03..388dcf9aa283c83ee78dcf22503de253e812a27d 100644 (file)
@@ -2670,6 +2670,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        pte_unmap(page_table);
 
+       /* File mapping without ->vm_ops ? */
+       if (vma->vm_flags & VM_SHARED)
+               return VM_FAULT_SIGBUS;
+
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
                return VM_FAULT_SIGSEGV;
@@ -3099,6 +3103,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
        pte_unmap(page_table);
+       /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+       if (!vma->vm_ops->fault)
+               return VM_FAULT_SIGBUS;
        if (!(flags & FAULT_FLAG_WRITE))
                return do_read_fault(mm, vma, address, pmd, pgoff, flags,
                                orig_pte);
@@ -3244,13 +3251,12 @@ static int handle_pte_fault(struct mm_struct *mm,
        barrier();
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
-                       if (vma->vm_ops) {
-                               if (likely(vma->vm_ops->fault))
-                                       return do_fault(mm, vma, address, pte,
-                                                       pmd, flags, entry);
-                       }
-                       return do_anonymous_page(mm, vma, address,
-                                                pte, pmd, flags);
+                       if (vma->vm_ops)
+                               return do_fault(mm, vma, address, pte, pmd,
+                                               flags, entry);
+
+                       return do_anonymous_page(mm, vma, address, pte, pmd,
+                                       flags);
                }
                return do_swap_page(mm, vma, address,
                                        pte, pmd, flags, entry);
index e97572b5d2ccfbce420009f60b30f5439fd1c571..0ff6e1bbca910a35fc94794d39ac8978085b144d 100644 (file)
@@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
        } else {
                skb_push(skb, ETH_HLEN);
                br_drop_fake_rtable(skb);
+               skb_sender_cpu_clear(skb);
                dev_queue_xmit(skb);
        }
 
index e29ad70b3000be4f0e7486b172746ea62b4f216f..c11cf2611db0c870542969b6847d0a61d18b64d4 100644 (file)
@@ -323,6 +323,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        struct net_bridge_port_group *p;
        struct net_bridge_port_group __rcu **pp;
        struct net_bridge_mdb_htable *mdb;
+       unsigned long now = jiffies;
        int err;
 
        mdb = mlock_dereference(br->mdb, br);
@@ -347,6 +348,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (unlikely(!p))
                return -ENOMEM;
        rcu_assign_pointer(*pp, p);
+       if (state == MDB_TEMPORARY)
+               mod_timer(&p->timer, now + br->multicast_membership_interval);
 
        br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
        return 0;
@@ -371,6 +374,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
                return -EINVAL;
 
+       memset(&ip, 0, sizeof(ip));
        ip.proto = entry->addr.proto;
        if (ip.proto == htons(ETH_P_IP))
                ip.u.ip4 = entry->addr.u.ip4;
@@ -417,20 +421,14 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
        if (!netif_running(br->dev) || br->multicast_disabled)
                return -EINVAL;
 
+       memset(&ip, 0, sizeof(ip));
        ip.proto = entry->addr.proto;
-       if (ip.proto == htons(ETH_P_IP)) {
-               if (timer_pending(&br->ip4_other_query.timer))
-                       return -EBUSY;
-
+       if (ip.proto == htons(ETH_P_IP))
                ip.u.ip4 = entry->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
-       } else {
-               if (timer_pending(&br->ip6_other_query.timer))
-                       return -EBUSY;
-
+       else
                ip.u.ip6 = entry->addr.u.ip6;
 #endif
-       }
 
        spin_lock_bh(&br->multicast_lock);
        mdb = mlock_dereference(br->mdb, br);
index d89f4fac0bc507cb3faea7782d9791d48a552fea..c8b9bcfe997e48556cfc2633981e272e063f6880 100644 (file)
@@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct brnf_frag_data {
        char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
        u8 encap_size;
@@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
 }
 #endif
 
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
                             int (*output)(struct sock *, struct sk_buff *))
 {
@@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
 
        return ip_do_fragment(sk, skb, output);
 }
+#endif
 
 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 {
@@ -742,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
                struct brnf_frag_data *data;
 
                if (br_validate_ipv4(skb))
-                       return NF_DROP;
+                       goto drop;
 
                IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
 
@@ -767,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
                struct brnf_frag_data *data;
 
                if (br_validate_ipv6(skb))
-                       return NF_DROP;
+                       goto drop;
 
                IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
 
@@ -782,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 
                if (v6ops)
                        return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
-               else
-                       return -EMSGSIZE;
+
+               kfree_skb(skb);
+               return -EMSGSIZE;
        }
 #endif
        nf_bridge_info_free(skb);
        return br_dev_queue_push_xmit(sk, skb);
+ drop:
+       kfree_skb(skb);
+       return 0;
 }
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
index 6d12d2675c80920571a5788a2662f08a6cb73345..13b7d1e3d1850e9aa408b55287c8ad72c950081a 100644 (file)
@@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb)
 {
        const struct ipv6hdr *hdr;
        struct net_device *dev = skb->dev;
-       struct inet6_dev *idev = in6_dev_get(skb->dev);
+       struct inet6_dev *idev = __in6_dev_get(skb->dev);
        u32 pkt_len;
        u8 ip6h_len = sizeof(struct ipv6hdr);
 
index 6b67ed3831de504c7f2d52e8cf7436c24365f183..364bdc98bd9bef003dfe4f17a1f2ac3048c0bd02 100644 (file)
@@ -457,6 +457,8 @@ static int br_afspec(struct net_bridge *br,
                if (nla_len(attr) != sizeof(struct bridge_vlan_info))
                        return -EINVAL;
                vinfo = nla_data(attr);
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+                       return -EINVAL;
                if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
                        if (vinfo_start)
                                return -EINVAL;
index 7933e62a7318915327c7f0a9a8ead8beada40a2e..166d436196c14480184859e2bcaadeb7c5514f3a 100644 (file)
@@ -89,6 +89,8 @@ struct timer_list can_stattimer;   /* timer for statistics update */
 struct s_stats    can_stats;       /* packet statistics */
 struct s_pstats   can_pstats;      /* receive list statistics */
 
+static atomic_t skbcounter = ATOMIC_INIT(0);
+
 /*
  * af_can socket functions
  */
@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
                return err;
        }
 
-       if (newskb) {
-               if (!(newskb->tstamp.tv64))
-                       __net_timestamp(newskb);
-
+       if (newskb)
                netif_rx_ni(newskb);
-       }
 
        /* update statistics */
        can_stats.tx_frames++;
@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
        can_stats.rx_frames++;
        can_stats.rx_frames_delta++;
 
+       /* create non-zero unique skb identifier together with *skb */
+       while (!(can_skb_prv(skb)->skbcnt))
+               can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
+
        rcu_read_lock();
 
        /* deliver the packet to sockets listening on all devices */
index b523453585be7f56f1e04eb97977b2eb4f90dadb..a1ba6875c2a2073d55b6f797e16d88baed3af3d1 100644 (file)
@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
 
@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
        }
 
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
        skb->dev = dev;
        can_skb_set_owner(skb, sk);
        err = can_send(skb, 1); /* send with loopback */
index 31b9748cbb4ec6c1caa9ddc9e3a80609077a81eb..2e67b1423cd32d82cbf34742bf9208cb494eaa28 100644 (file)
@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
  */
 
 struct uniqframe {
-       ktime_t tstamp;
+       int skbcnt;
        const struct sk_buff *skb;
        unsigned int join_rx_count;
 };
@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
 
        /* eliminate multiple filter matches for the same skb */
        if (this_cpu_ptr(ro->uniq)->skb == oskb &&
-           ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
+           this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
                if (ro->join_filters) {
                        this_cpu_inc(ro->uniq->join_rx_count);
                        /* drop frame until all enabled filters matched */
@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
                }
        } else {
                this_cpu_ptr(ro->uniq)->skb = oskb;
-               this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
+               this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
                this_cpu_ptr(ro->uniq)->join_rx_count = 1;
                /* drop first frame to check all enabled filters? */
                if (ro->join_filters && ro->count > 1)
@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        err = memcpy_from_msg(skb_put(skb, size), msg, size);
        if (err < 0)
index cb7db320dd276aa0107d8e484270e3e8fb61dd6a..f30329f726418bdc2e82bb5aced4c3634e215850 100644 (file)
@@ -9,6 +9,7 @@
 #include <keys/ceph-type.h>
 #include <linux/module.h>
 #include <linux/mount.h>
+#include <linux/nsproxy.h>
 #include <linux/parser.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
@@ -16,8 +17,6 @@
 #include <linux/statfs.h>
 #include <linux/string.h>
 #include <linux/vmalloc.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
 
 
 #include <linux/ceph/ceph_features.h>
@@ -131,6 +130,13 @@ int ceph_compare_options(struct ceph_options *new_opt,
        int i;
        int ret;
 
+       /*
+        * Don't bother comparing options if network namespaces don't
+        * match.
+        */
+       if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))
+               return -1;
+
        ret = memcmp(opt1, opt2, ofs);
        if (ret)
                return ret;
@@ -335,9 +341,6 @@ ceph_parse_options(char *options, const char *dev_name,
        int err = -ENOMEM;
        substring_t argstr[MAX_OPT_ARGS];
 
-       if (current->nsproxy->net_ns != &init_net)
-               return ERR_PTR(-EINVAL);
-
        opt = kzalloc(sizeof(*opt), GFP_KERNEL);
        if (!opt)
                return ERR_PTR(-ENOMEM);
@@ -608,6 +611,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
 fail_monc:
        ceph_monc_stop(&client->monc);
 fail:
+       ceph_messenger_fini(&client->msgr);
        kfree(client);
        return ERR_PTR(err);
 }
@@ -621,8 +625,8 @@ void ceph_destroy_client(struct ceph_client *client)
 
        /* unmount */
        ceph_osdc_stop(&client->osdc);
-
        ceph_monc_stop(&client->monc);
+       ceph_messenger_fini(&client->msgr);
 
        ceph_debugfs_client_cleanup(client);
 
index 1679f47280e2678202238e667c1b617c7665d5fa..e3be1d22a2477dd2d9e271a4594d70ebdb1148e7 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/inet.h>
 #include <linux/kthread.h>
 #include <linux/net.h>
+#include <linux/nsproxy.h>
 #include <linux/slab.h>
 #include <linux/socket.h>
 #include <linux/string.h>
@@ -479,7 +480,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
        int ret;
 
        BUG_ON(con->sock);
-       ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family,
+       ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret)
                return ret;
@@ -1731,17 +1732,17 @@ static int verify_hello(struct ceph_connection *con)
 
 static bool addr_is_blank(struct sockaddr_storage *ss)
 {
+       struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
+       struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
+
        switch (ss->ss_family) {
        case AF_INET:
-               return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
+               return addr->s_addr == htonl(INADDR_ANY);
        case AF_INET6:
-               return
-                    ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
-                    ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
-                    ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
-                    ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
+               return ipv6_addr_any(addr6);
+       default:
+               return true;
        }
-       return false;
 }
 
 static int addr_port(struct sockaddr_storage *ss)
@@ -2944,11 +2945,18 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
        msgr->tcp_nodelay = tcp_nodelay;
 
        atomic_set(&msgr->stopping, 0);
+       write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
 
        dout("%s %p\n", __func__, msgr);
 }
 EXPORT_SYMBOL(ceph_messenger_init);
 
+void ceph_messenger_fini(struct ceph_messenger *msgr)
+{
+       put_net(read_pnet(&msgr->net));
+}
+EXPORT_SYMBOL(ceph_messenger_fini);
+
 static void clear_standby(struct ceph_connection *con)
 {
        /* come back from STANDBY? */
index 6778a9999d525307d5bd41a1750a6e96a6e22bf3..a8e4dd4302853702fef7fb1462fbdeb8a38f45e1 100644 (file)
@@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)
        if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
                return dev->netdev_ops->ndo_get_iflink(dev);
 
-       /* If dev->rtnl_link_ops is set, it's a virtual interface. */
-       if (dev->rtnl_link_ops)
-               return 0;
-
        return dev->ifindex;
 }
 EXPORT_SYMBOL(dev_get_iflink);
@@ -3452,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
        local_irq_save(flags);
 
        rps_lock(sd);
+       if (!netif_running(skb->dev))
+               goto drop;
        qlen = skb_queue_len(&sd->input_pkt_queue);
        if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
                if (qlen) {
@@ -3473,6 +3471,7 @@ enqueue:
                goto enqueue;
        }
 
+drop:
        sd->dropped++;
        rps_unlock(sd);
 
@@ -3775,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 
        pt_prev = NULL;
 
-       rcu_read_lock();
-
 another_round:
        skb->skb_iif = skb->dev->ifindex;
 
@@ -3786,7 +3783,7 @@ another_round:
            skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
                skb = skb_vlan_untag(skb);
                if (unlikely(!skb))
-                       goto unlock;
+                       goto out;
        }
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -3816,10 +3813,10 @@ skip_taps:
        if (static_key_false(&ingress_needed)) {
                skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
                if (!skb)
-                       goto unlock;
+                       goto out;
 
                if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
-                       goto unlock;
+                       goto out;
        }
 #endif
 #ifdef CONFIG_NET_CLS_ACT
@@ -3837,7 +3834,7 @@ ncls:
                if (vlan_do_receive(&skb))
                        goto another_round;
                else if (unlikely(!skb))
-                       goto unlock;
+                       goto out;
        }
 
        rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3849,7 +3846,7 @@ ncls:
                switch (rx_handler(&skb)) {
                case RX_HANDLER_CONSUMED:
                        ret = NET_RX_SUCCESS;
-                       goto unlock;
+                       goto out;
                case RX_HANDLER_ANOTHER:
                        goto another_round;
                case RX_HANDLER_EXACT:
@@ -3903,8 +3900,7 @@ drop:
                ret = NET_RX_DROP;
        }
 
-unlock:
-       rcu_read_unlock();
+out:
        return ret;
 }
 
@@ -3935,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
 static int netif_receive_skb_internal(struct sk_buff *skb)
 {
+       int ret;
+
        net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
 
+       rcu_read_lock();
+
 #ifdef CONFIG_RPS
        if (static_key_false(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
-               int cpu, ret;
-
-               rcu_read_lock();
-
-               cpu = get_rps_cpu(skb->dev, skb, &rflow);
+               int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
                if (cpu >= 0) {
                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        rcu_read_unlock();
                        return ret;
                }
-               rcu_read_unlock();
        }
 #endif
-       return __netif_receive_skb(skb);
+       ret = __netif_receive_skb(skb);
+       rcu_read_unlock();
+       return ret;
 }
 
 /**
@@ -4502,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
                struct sk_buff *skb;
 
                while ((skb = __skb_dequeue(&sd->process_queue))) {
+                       rcu_read_lock();
                        local_irq_enable();
                        __netif_receive_skb(skb);
+                       rcu_read_unlock();
                        local_irq_disable();
                        input_queue_head_incr(sd);
                        if (++work >= quota) {
@@ -6139,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
                unlist_netdevice(dev);
 
                dev->reg_state = NETREG_UNREGISTERING;
+               on_each_cpu(flush_backlog, dev, 1);
        }
 
        synchronize_net();
@@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
        struct netdev_queue *tx;
        size_t sz = count * sizeof(*tx);
 
-       BUG_ON(count < 1 || count > 0xffff);
+       if (count < 1 || count > 0xffff)
+               return -EINVAL;
 
        tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
        if (!tx) {
@@ -6773,8 +6774,6 @@ void netdev_run_todo(void)
 
                dev->reg_state = NETREG_UNREGISTERED;
 
-               on_each_cpu(flush_backlog, dev, 1);
-
                netdev_wait_allrefs(dev);
 
                /* paranoia */
index 9dfb88a933e7c25ac5125570c2fd0edf4696c1cb..92d886f4adcbd9feebbf9ae5eb084f8ba5fa946b 100644 (file)
@@ -66,7 +66,7 @@
 
    NOTES.
 
-   * avbps is scaled by 2^5, avpps is scaled by 2^10.
+   * avbps and avpps are scaled by 2^5.
    * both values are reported as 32 bit unsigned values. bps can
      overflow for fast links : max speed being 34360Mbit/sec
    * Minimal interval is HZ/4=250msec (it is the greatest common divisor
@@ -85,10 +85,10 @@ struct gen_estimator
        struct gnet_stats_rate_est64    *rate_est;
        spinlock_t              *stats_lock;
        int                     ewma_log;
+       u32                     last_packets;
+       unsigned long           avpps;
        u64                     last_bytes;
        u64                     avbps;
-       u32                     last_packets;
-       u32                     avpps;
        struct rcu_head         e_rcu;
        struct rb_node          node;
        struct gnet_stats_basic_cpu __percpu *cpu_bstats;
@@ -118,8 +118,8 @@ static void est_timer(unsigned long arg)
        rcu_read_lock();
        list_for_each_entry_rcu(e, &elist[idx].list, list) {
                struct gnet_stats_basic_packed b = {0};
+               unsigned long rate;
                u64 brate;
-               u32 rate;
 
                spin_lock(e->stats_lock);
                read_lock(&est_lock);
@@ -133,10 +133,11 @@ static void est_timer(unsigned long arg)
                e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
                e->rate_est->bps = (e->avbps+0xF)>>5;
 
-               rate = (b.packets - e->last_packets)<<(12 - idx);
+               rate = b.packets - e->last_packets;
+               rate <<= (7 - idx);
                e->last_packets = b.packets;
                e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
-               e->rate_est->pps = (e->avpps+0x1FF)>>10;
+               e->rate_est->pps = (e->avpps + 0xF) >> 5;
 skip:
                read_unlock(&est_lock);
                spin_unlock(e->stats_lock);
index 05badbb588659eaba8db986900f22f104d498dc0..1ebdf1c0d1188c309d854bc9145c9b2f5b7b58a4 100644 (file)
@@ -3571,13 +3571,6 @@ static int pktgen_thread_worker(void *arg)
        pr_debug("%s removing thread\n", t->tsk->comm);
        pktgen_rem_thread(t);
 
-       /* Wait for kthread_stop */
-       while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule();
-       }
-       __set_current_state(TASK_RUNNING);
-
        return 0;
 }
 
@@ -3769,6 +3762,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
        }
 
        t->net = pn;
+       get_task_struct(p);
        wake_up_process(p);
        wait_for_completion(&t->start_done);
 
@@ -3891,6 +3885,7 @@ static void __net_exit pg_net_exit(struct net *net)
                t = list_entry(q, struct pktgen_thread, th_list);
                list_del(&t->th_list);
                kthread_stop(t->tsk);
+               put_task_struct(t->tsk);
                kfree(t);
        }
 
index 01ced4a889e04b02648d017985a2b53bc293be19..9e433d58d2651cf867294a911d0f136e565730ae 100644 (file)
@@ -1328,10 +1328,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
        [IFLA_INFO_SLAVE_DATA]  = { .type = NLA_NESTED },
 };
 
-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
-       [IFLA_VF_INFO]          = { .type = NLA_NESTED },
-};
-
 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
        [IFLA_VF_MAC]           = { .len = sizeof(struct ifla_vf_mac) },
        [IFLA_VF_VLAN]          = { .len = sizeof(struct ifla_vf_vlan) },
@@ -1488,96 +1484,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
        return 0;
 }
 
-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
+static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
 {
-       int rem, err = -EINVAL;
-       struct nlattr *vf;
        const struct net_device_ops *ops = dev->netdev_ops;
+       int err = -EINVAL;
 
-       nla_for_each_nested(vf, attr, rem) {
-               switch (nla_type(vf)) {
-               case IFLA_VF_MAC: {
-                       struct ifla_vf_mac *ivm;
-                       ivm = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_mac)
-                               err = ops->ndo_set_vf_mac(dev, ivm->vf,
-                                                         ivm->mac);
-                       break;
-               }
-               case IFLA_VF_VLAN: {
-                       struct ifla_vf_vlan *ivv;
-                       ivv = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_vlan)
-                               err = ops->ndo_set_vf_vlan(dev, ivv->vf,
-                                                          ivv->vlan,
-                                                          ivv->qos);
-                       break;
-               }
-               case IFLA_VF_TX_RATE: {
-                       struct ifla_vf_tx_rate *ivt;
-                       struct ifla_vf_info ivf;
-                       ivt = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_get_vf_config)
-                               err = ops->ndo_get_vf_config(dev, ivt->vf,
-                                                            &ivf);
-                       if (err)
-                               break;
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_rate)
-                               err = ops->ndo_set_vf_rate(dev, ivt->vf,
-                                                          ivf.min_tx_rate,
-                                                          ivt->rate);
-                       break;
-               }
-               case IFLA_VF_RATE: {
-                       struct ifla_vf_rate *ivt;
-                       ivt = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_rate)
-                               err = ops->ndo_set_vf_rate(dev, ivt->vf,
-                                                          ivt->min_tx_rate,
-                                                          ivt->max_tx_rate);
-                       break;
-               }
-               case IFLA_VF_SPOOFCHK: {
-                       struct ifla_vf_spoofchk *ivs;
-                       ivs = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_spoofchk)
-                               err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
-                                                              ivs->setting);
-                       break;
-               }
-               case IFLA_VF_LINK_STATE: {
-                       struct ifla_vf_link_state *ivl;
-                       ivl = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_link_state)
-                               err = ops->ndo_set_vf_link_state(dev, ivl->vf,
-                                                                ivl->link_state);
-                       break;
-               }
-               case IFLA_VF_RSS_QUERY_EN: {
-                       struct ifla_vf_rss_query_en *ivrssq_en;
+       if (tb[IFLA_VF_MAC]) {
+               struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
 
-                       ivrssq_en = nla_data(vf);
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_rss_query_en)
-                               err = ops->ndo_set_vf_rss_query_en(dev,
-                                                           ivrssq_en->vf,
-                                                           ivrssq_en->setting);
-                       break;
-               }
-               default:
-                       err = -EINVAL;
-                       break;
-               }
-               if (err)
-                       break;
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_mac)
+                       err = ops->ndo_set_vf_mac(dev, ivm->vf,
+                                                 ivm->mac);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[IFLA_VF_VLAN]) {
+               struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_vlan)
+                       err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
+                                                  ivv->qos);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[IFLA_VF_TX_RATE]) {
+               struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
+               struct ifla_vf_info ivf;
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_get_vf_config)
+                       err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
+               if (err < 0)
+                       return err;
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_rate)
+                       err = ops->ndo_set_vf_rate(dev, ivt->vf,
+                                                  ivf.min_tx_rate,
+                                                  ivt->rate);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[IFLA_VF_RATE]) {
+               struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_rate)
+                       err = ops->ndo_set_vf_rate(dev, ivt->vf,
+                                                  ivt->min_tx_rate,
+                                                  ivt->max_tx_rate);
+               if (err < 0)
+                       return err;
        }
+
+       if (tb[IFLA_VF_SPOOFCHK]) {
+               struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_spoofchk)
+                       err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+                                                      ivs->setting);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[IFLA_VF_LINK_STATE]) {
+               struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_link_state)
+                       err = ops->ndo_set_vf_link_state(dev, ivl->vf,
+                                                        ivl->link_state);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[IFLA_VF_RSS_QUERY_EN]) {
+               struct ifla_vf_rss_query_en *ivrssq_en;
+
+               err = -EOPNOTSUPP;
+               ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
+               if (ops->ndo_set_vf_rss_query_en)
+                       err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
+                                                          ivrssq_en->setting);
+               if (err < 0)
+                       return err;
+       }
+
        return err;
 }
 
@@ -1773,14 +1771,21 @@ static int do_setlink(const struct sk_buff *skb,
        }
 
        if (tb[IFLA_VFINFO_LIST]) {
+               struct nlattr *vfinfo[IFLA_VF_MAX + 1];
                struct nlattr *attr;
                int rem;
+
                nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
-                       if (nla_type(attr) != IFLA_VF_INFO) {
+                       if (nla_type(attr) != IFLA_VF_INFO ||
+                           nla_len(attr) < NLA_HDRLEN) {
                                err = -EINVAL;
                                goto errout;
                        }
-                       err = do_setvfinfo(dev, attr);
+                       err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
+                                              ifla_vf_policy);
+                       if (err < 0)
+                               goto errout;
+                       err = do_setvfinfo(dev, vfinfo);
                        if (err < 0)
                                goto errout;
                        status |= DO_SETLINK_NOTIFY;
index 392e29a0227dbf4aa4870d73c5ef333db528b675..b445d492c115382b9ecd25cbceb3e47053263387 100644 (file)
@@ -630,7 +630,7 @@ static int dsa_of_probe(struct device *dev)
                        continue;
 
                cd->sw_addr = be32_to_cpup(sw_addr);
-               if (cd->sw_addr > PHY_MAX_ADDR)
+               if (cd->sw_addr >= PHY_MAX_ADDR)
                        continue;
 
                if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
@@ -642,6 +642,8 @@ static int dsa_of_probe(struct device *dev)
                                continue;
 
                        port_index = be32_to_cpup(port_reg);
+                       if (port_index >= DSA_MAX_PORTS)
+                               break;
 
                        port_name = of_get_property(port, "label", NULL);
                        if (!port_name)
@@ -666,8 +668,6 @@ static int dsa_of_probe(struct device *dev)
                                        goto out_free_chip;
                        }
 
-                       if (port_index == DSA_MAX_PORTS)
-                               break;
                }
        }
 
index 7498716e8f541b5a55aef59a85e70b302ba7248c..e813196c91c76a66cc2eb272064d484734bd7497 100644 (file)
@@ -1740,6 +1740,8 @@ static int inet_netconf_msgsize_devconf(int type)
                size += nla_total_size(4);
        if (type == -1 || type == NETCONFA_PROXY_NEIGH)
                size += nla_total_size(4);
+       if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
+               size += nla_total_size(4);
 
        return size;
 }
@@ -1780,6 +1782,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
            nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
                        IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
                goto nla_put_failure;
+       if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
+           nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                       IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
+               goto nla_put_failure;
 
        nlmsg_end(skb, nlh);
        return 0;
@@ -1819,6 +1825,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
        [NETCONFA_FORWARDING]   = { .len = sizeof(int) },
        [NETCONFA_RP_FILTER]    = { .len = sizeof(int) },
        [NETCONFA_PROXY_NEIGH]  = { .len = sizeof(int) },
+       [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]  = { .len = sizeof(int) },
 };
 
 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -2048,6 +2055,12 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
                        inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
                                                    ifindex, cnf);
                }
+               if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
+                   new_value != old_value) {
+                       ifindex = devinet_conf_ifindex(net, cnf);
+                       inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                                                   ifindex, cnf);
+               }
        }
 
        return ret;
index 9bc26677058e0420739e4a7bcb7ffd1b593d6e67..c3b1f3a0f4cf63df6c7e862f96e44c8653d3a476 100644 (file)
@@ -152,8 +152,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                                       inet6_sk(sk)->tclass) < 0)
                                goto errout;
 
-               if (ipv6_only_sock(sk) &&
-                   nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1))
+               if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
+                   nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
                        goto errout;
        }
 #endif
index 4c2c3ba4ba6595c788940e3ec2c2dadfe7abe2f0..626d9e56a6bd2671611f3dde4f8090b76645301c 100644 (file)
@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
 EXPORT_SYMBOL(ip_tunnel_encap);
 
 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
-                           struct rtable *rt, __be16 df)
+                           struct rtable *rt, __be16 df,
+                           const struct iphdr *inner_iph)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
 
        if (skb->protocol == htons(ETH_P_IP)) {
                if (!skb_is_gso(skb) &&
-                   (df & htons(IP_DF)) && mtu < pkt_size) {
+                   (inner_iph->frag_off & htons(IP_DF)) &&
+                   mtu < pkt_size) {
                        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
                        return -E2BIG;
@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-       if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
+       if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
                ip_rt_put(rt);
                goto tx_error;
        }
index 95c9b6eece25dfa8da28f8866dd7e55ec86b72e7..92305a1a021a7936206f9078008532bc151ec757 100644 (file)
@@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
        unsigned int verdict = NF_DROP;
        const struct arphdr *arp;
-       struct arpt_entry *e, *back;
+       struct arpt_entry *e, **jumpstack;
        const char *indev, *outdev;
        const void *table_base;
+       unsigned int cpu, stackidx = 0;
        const struct xt_table_info *private;
        struct xt_action_param acpar;
        unsigned int addend;
@@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        local_bh_disable();
        addend = xt_write_recseq_begin();
        private = table->private;
+       cpu     = smp_processor_id();
        /*
         * Ensure we load private-> members after we've fetched the base
         * pointer.
         */
        smp_read_barrier_depends();
        table_base = private->entries;
+       jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
 
        e = get_entry(table_base, private->hook_entry[hook]);
-       back = get_entry(table_base, private->underflow[hook]);
 
        acpar.in      = state->in;
        acpar.out     = state->out;
@@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb,
                                        verdict = (unsigned int)(-v) - 1;
                                        break;
                                }
-                               e = back;
-                               back = get_entry(table_base, back->comefrom);
+                               if (stackidx == 0) {
+                                       e = get_entry(table_base,
+                                                     private->underflow[hook]);
+                               } else {
+                                       e = jumpstack[--stackidx];
+                                       e = arpt_next_entry(e);
+                               }
                                continue;
                        }
                        if (table_base + v
                            != arpt_next_entry(e)) {
-                               /* Save old back ptr in next entry */
-                               struct arpt_entry *next = arpt_next_entry(e);
-                               next->comefrom = (void *)back - table_base;
 
-                               /* set back pointer to next entry */
-                               back = next;
+                               if (stackidx >= private->stacksize) {
+                                       verdict = NF_DROP;
+                                       break;
+                               }
+                               jumpstack[stackidx++] = e;
                        }
 
                        e = get_entry(table_base, v);
index f2e464eba5efdb7b2a8abe3c2cecedae473777e7..57990c929cd8156ebac52c648deb50fd3f74ab82 100644 (file)
@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
                                if (offset < 0)
                                        goto out;
 
-                               if (!ipv6_is_mld(skb, nexthdr, offset))
-                                       goto out;
+                               if (ipv6_is_mld(skb, nexthdr, offset))
+                                       deliver = true;
 
-                               deliver = true;
+                               goto out;
                        }
                        /* unknown RA - process it normally */
                }
index 1a1122a6bbf5208481f81f1e2643cbc41ed2e7e9..6090969937f8b6809f74c3d03f29a0703089eff1 100644 (file)
@@ -369,10 +369,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
        struct inet6_dev *idev;
 
        dst_destroy_metrics_generic(dst);
-
-       if (rt->rt6i_pcpu)
-               free_percpu(rt->rt6i_pcpu);
-
+       free_percpu(rt->rt6i_pcpu);
        rt6_uncached_list_del(rt);
 
        idev = rt->rt6i_idev;
index cd60d397fe056f14ee4c310d75c4fba3f504cf71..8a8b2abc35ffdeacb5a61e1a801c92f91998bc3d 100644 (file)
@@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 
        if (verdict == NF_ACCEPT) {
        next_hook:
-               verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
+               verdict = nf_iterate(entry->state.hook_list,
                                     skb, &entry->state, &elem);
        }
 
index 8b117c90ecd765ac1b7242c58f36ef7531a49b6e..0c0e8ecf02abbb4214b18f00ef798d728234866b 100644 (file)
@@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
        }
 }
 
+enum {
+       NFNL_BATCH_FAILURE      = (1 << 0),
+       NFNL_BATCH_DONE         = (1 << 1),
+       NFNL_BATCH_REPLAY       = (1 << 2),
+};
+
 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
                                u_int16_t subsys_id)
 {
@@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        const struct nfnetlink_subsystem *ss;
        const struct nfnl_callback *nc;
-       bool success = true, done = false;
        static LIST_HEAD(err_list);
+       u32 status;
        int err;
 
        if (subsys_id >= NFNL_SUBSYS_COUNT)
                return netlink_ack(skb, nlh, -EINVAL);
 replay:
+       status = 0;
+
        skb = netlink_skb_clone(oskb, GFP_KERNEL);
        if (!skb)
                return netlink_ack(oskb, nlh, -ENOMEM);
@@ -336,10 +344,10 @@ replay:
                if (type == NFNL_MSG_BATCH_BEGIN) {
                        /* Malformed: Batch begin twice */
                        nfnl_err_reset(&err_list);
-                       success = false;
+                       status |= NFNL_BATCH_FAILURE;
                        goto done;
                } else if (type == NFNL_MSG_BATCH_END) {
-                       done = true;
+                       status |= NFNL_BATCH_DONE;
                        goto done;
                } else if (type < NLMSG_MIN_TYPE) {
                        err = -EINVAL;
@@ -382,11 +390,8 @@ replay:
                         * original skb.
                         */
                        if (err == -EAGAIN) {
-                               nfnl_err_reset(&err_list);
-                               ss->abort(oskb);
-                               nfnl_unlock(subsys_id);
-                               kfree_skb(skb);
-                               goto replay;
+                               status |= NFNL_BATCH_REPLAY;
+                               goto next;
                        }
                }
 ack:
@@ -402,7 +407,7 @@ ack:
                                 */
                                nfnl_err_reset(&err_list);
                                netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
-                               success = false;
+                               status |= NFNL_BATCH_FAILURE;
                                goto done;
                        }
                        /* We don't stop processing the batch on errors, thus,
@@ -410,19 +415,26 @@ ack:
                         * triggers.
                         */
                        if (err)
-                               success = false;
+                               status |= NFNL_BATCH_FAILURE;
                }
-
+next:
                msglen = NLMSG_ALIGN(nlh->nlmsg_len);
                if (msglen > skb->len)
                        msglen = skb->len;
                skb_pull(skb, msglen);
        }
 done:
-       if (success && done)
+       if (status & NFNL_BATCH_REPLAY) {
+               ss->abort(oskb);
+               nfnl_err_reset(&err_list);
+               nfnl_unlock(subsys_id);
+               kfree_skb(skb);
+               goto replay;
+       } else if (status == NFNL_BATCH_DONE) {
                ss->commit(oskb);
-       else
+       } else {
                ss->abort(oskb);
+       }
 
        nfnl_err_deliver(&err_list, oskb);
        nfnl_unlock(subsys_id);
index dea925388a5b7a86ba649f888fb10bb805f54edb..9a0ae7172f9271851f7a7c036e1c1980f45e5255 100644 (file)
@@ -158,7 +158,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
 out:
        spin_unlock(&netlink_tap_lock);
 
-       if (found && nt->module)
+       if (found)
                module_put(nt->module);
 
        return found ? 0 : -ENODEV;
index 8b4a6cd2c3a78f0a4c7dbbf89fbe1bd6156aeb55..83498e1c75b83f3230dace76da031a6f46753ee9 100644 (file)
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(rds_trans_unregister);
 
 void rds_trans_put(struct rds_transport *trans)
 {
-       if (trans && trans->t_owner)
+       if (trans)
                module_put(trans->t_owner);
 }
 
index 84f77a0540251cbe127785de670e5b0a41a3bdac..9f2add3cba26e54eadc15aeea05c3db167a75665 100644 (file)
@@ -171,8 +171,10 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
                 * released.
                 */
 
-               attr->trans = SWITCHDEV_TRANS_ABORT;
-               __switchdev_port_attr_set(dev, attr);
+               if (err != -EOPNOTSUPP) {
+                       attr->trans = SWITCHDEV_TRANS_ABORT;
+                       __switchdev_port_attr_set(dev, attr);
+               }
 
                return err;
        }
@@ -249,8 +251,10 @@ int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
                 * released.
                 */
 
-               obj->trans = SWITCHDEV_TRANS_ABORT;
-               __switchdev_port_obj_add(dev, obj);
+               if (err != -EOPNOTSUPP) {
+                       obj->trans = SWITCHDEV_TRANS_ABORT;
+                       __switchdev_port_obj_add(dev, obj);
+               }
 
                return err;
        }
index 46b6ed534ef224e5e1049b34eb9ebe34c27b383d..3a7567f690f35458f0fe58e0cc1254a4ec8033fa 100644 (file)
@@ -2007,6 +2007,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
        res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
        if (res)
                goto exit;
+       security_sk_clone(sock->sk, new_sock->sk);
 
        new_sk = new_sock->sk;
        new_tsock = tipc_sk(new_sk);
index eff7de1fc82ee890e5a8f51695ee5a5751aa80f8..e70fcd12eeebe813bafd7e56f014bcf5b4e33647 100644 (file)
@@ -63,6 +63,8 @@ int main(void)
 
        DEVID(acpi_device_id);
        DEVID_FIELD(acpi_device_id, id);
+       DEVID_FIELD(acpi_device_id, cls);
+       DEVID_FIELD(acpi_device_id, cls_msk);
 
        DEVID(pnp_device_id);
        DEVID_FIELD(pnp_device_id, id);
index 84c86f3cd6cdc1b44b53b7a60b066dd6972b2b96..5f208820913259651246d71a3def44f49fa8da44 100644 (file)
@@ -523,12 +523,40 @@ static int do_serio_entry(const char *filename,
 }
 ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
 
-/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
+/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
+ *             "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
+ *
+ * NOTE: Each driver should use one of the following : _HID, _CIDs
+ *       or _CLS. Also, bb, ss, and pp can be substituted with ??
+ *       as don't care byte.
+ */
 static int do_acpi_entry(const char *filename,
                        void *symval, char *alias)
 {
        DEF_FIELD_ADDR(symval, acpi_device_id, id);
-       sprintf(alias, "acpi*:%s:*", *id);
+       DEF_FIELD_ADDR(symval, acpi_device_id, cls);
+       DEF_FIELD_ADDR(symval, acpi_device_id, cls_msk);
+
+       if (id && strlen((const char *)*id))
+               sprintf(alias, "acpi*:%s:*", *id);
+       else if (cls) {
+               int i, byte_shift, cnt = 0;
+               unsigned int msk;
+
+               sprintf(&alias[cnt], "acpi*:");
+               cnt = 6;
+               for (i = 1; i <= 3; i++) {
+                       byte_shift = 8 * (3-i);
+                       msk = (*cls_msk >> byte_shift) & 0xFF;
+                       if (msk)
+                               sprintf(&alias[cnt], "%02x",
+                                       (*cls >> byte_shift) & 0xFF);
+                       else
+                               sprintf(&alias[cnt], "??");
+                       cnt += 2;
+               }
+               sprintf(&alias[cnt], ":*");
+       }
        return 1;
 }
 ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
index 91ee1b2e0f9aa65fbdcb471d8f6695489453029f..12d3db3bd46b491542090cd2db01637754467e99 100644 (file)
@@ -886,7 +886,8 @@ static void check_section(const char *modname, struct elf_info *elf,
 #define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
                ".kprobes.text"
 #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
-               ".fixup", ".entry.text", ".exception.text", ".text.*"
+               ".fixup", ".entry.text", ".exception.text", ".text.*", \
+               ".coldtext"
 
 #define INIT_SECTIONS      ".init.*"
 #define MEM_INIT_SECTIONS  ".meminit.*"
index 62310819964145eb800adea251122a2298c8d532..564079c5c49dce530f56fd0626827d81c0ec75d4 100644 (file)
@@ -3283,7 +3283,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
        int rc = 0;
 
        if (default_noexec &&
-           (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+           (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
+                                  (!shared && (prot & PROT_WRITE)))) {
                /*
                 * We are making executable an anonymous mapping or a
                 * private file mapping that will also be writable.
index afe6a269ec177897d3a8851ae2e3cddf19688bce..57644b1dc42e1a38b5f1fd0d24ab75164b3bdbf1 100644 (file)
@@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
                if (offset == (u32)-1)
                        return 0;
 
+               /* don't waste ebitmap space if the netlabel bitmap is empty */
+               if (bitmap == 0) {
+                       offset += EBITMAP_UNIT_SIZE;
+                       continue;
+               }
+
                if (e_iter == NULL ||
                    offset >= e_iter->startbit + EBITMAP_SIZE) {
                        e_prev = e_iter;
index f0e72674c52d2c9b88b46cb281db39fe8fb68d8d..9098083869c85a62bc4df5bcf8df96f5ffcc7e00 100644 (file)
 
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
+#include <linux/types.h>
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)res, (const void *)p, size);
+               barrier();
+       }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)p, (const void *)res, size);
+               barrier();
+       }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering.  One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#define WRITE_ONCE(x, val) \
+       ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
 #endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
deleted file mode 100644 (file)
index d07e586..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _TOOLS_LINUX_EXPORT_H_
-#define _TOOLS_LINUX_EXPORT_H_
-
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-
-#endif
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
new file mode 100644 (file)
index 0000000..1125822
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/include/linux/rbtree.h
+
+  To use rbtrees you'll have to implement your own insert and search cores.
+  This will avoid us to use callbacks and to drop drammatically performances.
+  I know it's not the cleaner way,  but in C (not in C++) to get
+  performances and genericity...
+
+  See Documentation/rbtree.txt for documentation and samples.
+*/
+
+#ifndef __TOOLS_LINUX_PERF_RBTREE_H
+#define __TOOLS_LINUX_PERF_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+struct rb_node {
+       unsigned long  __rb_parent_color;
+       struct rb_node *rb_right;
+       struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+    /* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+       struct rb_node *rb_node;
+};
+
+
+#define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
+
+#define RB_ROOT        (struct rb_root) { NULL, }
+#define        rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define RB_EMPTY_ROOT(root)  ((root)->rb_node == NULL)
+
+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
+#define RB_EMPTY_NODE(node)  \
+       ((node)->__rb_parent_color == (unsigned long)(node))
+#define RB_CLEAR_NODE(node)  \
+       ((node)->__rb_parent_color = (unsigned long)(node))
+
+
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+                           struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+                               struct rb_node **rb_link)
+{
+       node->__rb_parent_color = (unsigned long)parent;
+       node->rb_left = node->rb_right = NULL;
+
+       *rb_link = node;
+}
+
+#define rb_entry_safe(ptr, type, member) \
+       ({ typeof(ptr) ____ptr = (ptr); \
+          ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+       })
+
+
+/*
+ * Handy for checking that we are not deleting an entry that is
+ * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
+ * probably should be moved to lib/rbtree.c...
+ */
+static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+       rb_erase(n, root);
+       RB_CLEAR_NODE(n);
+}
+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
new file mode 100644 (file)
index 0000000..43be941
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  (C) 2012  Michel Lespinasse <walken@google.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  tools/linux/include/linux/rbtree_augmented.h
+
+  Copied from:
+  linux/include/linux/rbtree_augmented.h
+*/
+
+#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
+#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+/*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+ * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
+ * The rest are implementation details you are not expected to depend on.
+ *
+ * See Documentation/rbtree.txt for documentation and samples.
+ */
+
+struct rb_augment_callbacks {
+       void (*propagate)(struct rb_node *node, struct rb_node *stop);
+       void (*copy)(struct rb_node *old, struct rb_node *new);
+       void (*rotate)(struct rb_node *old, struct rb_node *new);
+};
+
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+/*
+ * Fixup the rbtree and update the augmented information when rebalancing.
+ *
+ * On insertion, the user must update the augmented information on the path
+ * leading to the inserted node, then call rb_link_node() as usual and
+ * rb_augment_inserted() instead of the usual rb_insert_color() call.
+ * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * a user provided function to update the augmented information on the
+ * affected subtrees.
+ */
+static inline void
+rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+                   const struct rb_augment_callbacks *augment)
+{
+       __rb_insert_augmented(node, root, augment->rotate);
+}
+
+#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield,      \
+                            rbtype, rbaugmented, rbcompute)            \
+static inline void                                                     \
+rbname ## _propagate(struct rb_node *rb, struct rb_node *stop)         \
+{                                                                      \
+       while (rb != stop) {                                            \
+               rbstruct *node = rb_entry(rb, rbstruct, rbfield);       \
+               rbtype augmented = rbcompute(node);                     \
+               if (node->rbaugmented == augmented)                     \
+                       break;                                          \
+               node->rbaugmented = augmented;                          \
+               rb = rb_parent(&node->rbfield);                         \
+       }                                                               \
+}                                                                      \
+static inline void                                                     \
+rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new)                \
+{                                                                      \
+       rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \
+       rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \
+       new->rbaugmented = old->rbaugmented;                            \
+}                                                                      \
+static void                                                            \
+rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)      \
+{                                                                      \
+       rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \
+       rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \
+       new->rbaugmented = old->rbaugmented;                            \
+       old->rbaugmented = rbcompute(old);                              \
+}                                                                      \
+rbstatic const struct rb_augment_callbacks rbname = {                  \
+       rbname ## _propagate, rbname ## _copy, rbname ## _rotate        \
+};
+
+
+#define        RB_RED          0
+#define        RB_BLACK        1
+
+#define __rb_parent(pc)    ((struct rb_node *)(pc & ~3))
+
+#define __rb_color(pc)     ((pc) & 1)
+#define __rb_is_black(pc)  __rb_color(pc)
+#define __rb_is_red(pc)    (!__rb_color(pc))
+#define rb_color(rb)       __rb_color((rb)->__rb_parent_color)
+#define rb_is_red(rb)      __rb_is_red((rb)->__rb_parent_color)
+#define rb_is_black(rb)    __rb_is_black((rb)->__rb_parent_color)
+
+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+       rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+}
+
+static inline void rb_set_parent_color(struct rb_node *rb,
+                                      struct rb_node *p, int color)
+{
+       rb->__rb_parent_color = (unsigned long)p | color;
+}
+
+static inline void
+__rb_change_child(struct rb_node *old, struct rb_node *new,
+                 struct rb_node *parent, struct rb_root *root)
+{
+       if (parent) {
+               if (parent->rb_left == old)
+                       parent->rb_left = new;
+               else
+                       parent->rb_right = new;
+       } else
+               root->rb_node = new;
+}
+
+extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                    const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+       struct rb_node *parent, *rebalance;
+       unsigned long pc;
+
+       if (!tmp) {
+               /*
+                * Case 1: node to erase has no more than 1 child (easy!)
+                *
+                * Note that if there is one child it must be red due to 5)
+                * and node must be black due to 4). We adjust colors locally
+                * so as to bypass __rb_erase_color() later on.
+                */
+               pc = node->__rb_parent_color;
+               parent = __rb_parent(pc);
+               __rb_change_child(node, child, parent, root);
+               if (child) {
+                       child->__rb_parent_color = pc;
+                       rebalance = NULL;
+               } else
+                       rebalance = __rb_is_black(pc) ? parent : NULL;
+               tmp = parent;
+       } else if (!child) {
+               /* Still case 1, but this time the child is node->rb_left */
+               tmp->__rb_parent_color = pc = node->__rb_parent_color;
+               parent = __rb_parent(pc);
+               __rb_change_child(node, tmp, parent, root);
+               rebalance = NULL;
+               tmp = parent;
+       } else {
+               struct rb_node *successor = child, *child2;
+               tmp = child->rb_left;
+               if (!tmp) {
+                       /*
+                        * Case 2: node's successor is its right child
+                        *
+                        *    (n)          (s)
+                        *    / \          / \
+                        *  (x) (s)  ->  (x) (c)
+                        *        \
+                        *        (c)
+                        */
+                       parent = successor;
+                       child2 = successor->rb_right;
+                       augment->copy(node, successor);
+               } else {
+                       /*
+                        * Case 3: node's successor is leftmost under
+                        * node's right child subtree
+                        *
+                        *    (n)          (s)
+                        *    / \          / \
+                        *  (x) (y)  ->  (x) (y)
+                        *      /            /
+                        *    (p)          (p)
+                        *    /            /
+                        *  (s)          (c)
+                        *    \
+                        *    (c)
+                        */
+                       do {
+                               parent = successor;
+                               successor = tmp;
+                               tmp = tmp->rb_left;
+                       } while (tmp);
+                       parent->rb_left = child2 = successor->rb_right;
+                       successor->rb_right = child;
+                       rb_set_parent(child, successor);
+                       augment->copy(node, successor);
+                       augment->propagate(parent, successor);
+               }
+
+               successor->rb_left = tmp = node->rb_left;
+               rb_set_parent(tmp, successor);
+
+               pc = node->__rb_parent_color;
+               tmp = __rb_parent(pc);
+               __rb_change_child(node, successor, tmp, root);
+               if (child2) {
+                       successor->__rb_parent_color = pc;
+                       rb_set_parent_color(child2, parent, RB_BLACK);
+                       rebalance = NULL;
+               } else {
+                       unsigned long pc2 = successor->__rb_parent_color;
+                       successor->__rb_parent_color = pc;
+                       rebalance = __rb_is_black(pc2) ? parent : NULL;
+               }
+               tmp = successor;
+       }
+
+       augment->propagate(tmp, NULL);
+       return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                  const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+       if (rebalance)
+               __rb_erase_color(rebalance, root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
new file mode 100644 (file)
index 0000000..17c2b59
--- /dev/null
@@ -0,0 +1,548 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  (C) 2012  Michel Lespinasse <walken@google.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/lib/rbtree.c
+*/
+
+#include <linux/rbtree_augmented.h>
+
+/*
+ * red-black trees properties:  http://en.wikipedia.org/wiki/Rbtree
+ *
+ *  1) A node is either red or black
+ *  2) The root is black
+ *  3) All leaves (NULL) are black
+ *  4) Both children of every red node are black
+ *  5) Every simple path from root to leaves contains the same number
+ *     of black nodes.
+ *
+ *  4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
+ *  consecutive red nodes in a path and every red node is therefore followed by
+ *  a black. So if B is the number of black nodes on every simple path (as per
+ *  5), then the longest possible path due to 4 is 2B.
+ *
+ *  We shall indicate color with case, where black nodes are uppercase and red
+ *  nodes will be lowercase. Unknown color nodes shall be drawn as red within
+ *  parentheses and have some accompanying text comment.
+ */
+
+static inline void rb_set_black(struct rb_node *rb)
+{
+       rb->__rb_parent_color |= RB_BLACK;
+}
+
+static inline struct rb_node *rb_red_parent(struct rb_node *red)
+{
+       return (struct rb_node *)red->__rb_parent_color;
+}
+
+/*
+ * Helper function for rotations:
+ * - old's parent and color get assigned to new
+ * - old gets assigned new as a parent and 'color' as a color.
+ */
+static inline void
+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
+                       struct rb_root *root, int color)
+{
+       struct rb_node *parent = rb_parent(old);
+       new->__rb_parent_color = old->__rb_parent_color;
+       rb_set_parent_color(old, new, color);
+       __rb_change_child(old, new, parent, root);
+}
+
+static __always_inline void
+__rb_insert(struct rb_node *node, struct rb_root *root,
+           void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+
+       while (true) {
+               /*
+                * Loop invariant: node is red
+                *
+                * If there is a black parent, we are done.
+                * Otherwise, take some corrective action as we don't
+                * want a red root or two consecutive red nodes.
+                */
+               if (!parent) {
+                       rb_set_parent_color(node, NULL, RB_BLACK);
+                       break;
+               } else if (rb_is_black(parent))
+                       break;
+
+               gparent = rb_red_parent(parent);
+
+               tmp = gparent->rb_right;
+               if (parent != tmp) {    /* parent == gparent->rb_left */
+                       if (tmp && rb_is_red(tmp)) {
+                               /*
+                                * Case 1 - color flips
+                                *
+                                *       G            g
+                                *      / \          / \
+                                *     p   u  -->   P   U
+                                *    /            /
+                                *   n            n
+                                *
+                                * However, since g's parent might be red, and
+                                * 4) does not allow this, we need to recurse
+                                * at g.
+                                */
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                               rb_set_parent_color(parent, gparent, RB_BLACK);
+                               node = gparent;
+                               parent = rb_parent(node);
+                               rb_set_parent_color(node, parent, RB_RED);
+                               continue;
+                       }
+
+                       tmp = parent->rb_right;
+                       if (node == tmp) {
+                               /*
+                                * Case 2 - left rotate at parent
+                                *
+                                *      G             G
+                                *     / \           / \
+                                *    p   U  -->    n   U
+                                *     \           /
+                                *      n         p
+                                *
+                                * This still leaves us in violation of 4), the
+                                * continuation into Case 3 will fix that.
+                                */
+                               parent->rb_right = tmp = node->rb_left;
+                               node->rb_left = parent;
+                               if (tmp)
+                                       rb_set_parent_color(tmp, parent,
+                                                           RB_BLACK);
+                               rb_set_parent_color(parent, node, RB_RED);
+                               augment_rotate(parent, node);
+                               parent = node;
+                               tmp = node->rb_right;
+                       }
+
+                       /*
+                        * Case 3 - right rotate at gparent
+                        *
+                        *        G           P
+                        *       / \         / \
+                        *      p   U  -->  n   g
+                        *     /                 \
+                        *    n                   U
+                        */
+                       gparent->rb_left = tmp;  /* == parent->rb_right */
+                       parent->rb_right = gparent;
+                       if (tmp)
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                       __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+                       augment_rotate(gparent, parent);
+                       break;
+               } else {
+                       tmp = gparent->rb_left;
+                       if (tmp && rb_is_red(tmp)) {
+                               /* Case 1 - color flips */
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                               rb_set_parent_color(parent, gparent, RB_BLACK);
+                               node = gparent;
+                               parent = rb_parent(node);
+                               rb_set_parent_color(node, parent, RB_RED);
+                               continue;
+                       }
+
+                       tmp = parent->rb_left;
+                       if (node == tmp) {
+                               /* Case 2 - right rotate at parent */
+                               parent->rb_left = tmp = node->rb_right;
+                               node->rb_right = parent;
+                               if (tmp)
+                                       rb_set_parent_color(tmp, parent,
+                                                           RB_BLACK);
+                               rb_set_parent_color(parent, node, RB_RED);
+                               augment_rotate(parent, node);
+                               parent = node;
+                               tmp = node->rb_left;
+                       }
+
+                       /* Case 3 - left rotate at gparent */
+                       gparent->rb_right = tmp;  /* == parent->rb_left */
+                       parent->rb_left = gparent;
+                       if (tmp)
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                       __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+                       augment_rotate(gparent, parent);
+                       break;
+               }
+       }
+}
+
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
+
+       while (true) {
+               /*
+                * Loop invariants:
+                * - node is black (or NULL on first iteration)
+                * - node is not the root (parent is not NULL)
+                * - All leaf paths going through parent and node have a
+                *   black node count that is 1 lower than other leaf paths.
+                */
+               sibling = parent->rb_right;
+               if (node != sibling) {  /* node == parent->rb_left */
+                       if (rb_is_red(sibling)) {
+                               /*
+                                * Case 1 - left rotate at parent
+                                *
+                                *     P               S
+                                *    / \             / \
+                                *   N   s    -->    p   Sr
+                                *      / \         / \
+                                *     Sl  Sr      N   Sl
+                                */
+                               parent->rb_right = tmp1 = sibling->rb_left;
+                               sibling->rb_left = parent;
+                               rb_set_parent_color(tmp1, parent, RB_BLACK);
+                               __rb_rotate_set_parents(parent, sibling, root,
+                                                       RB_RED);
+                               augment_rotate(parent, sibling);
+                               sibling = tmp1;
+                       }
+                       tmp1 = sibling->rb_right;
+                       if (!tmp1 || rb_is_black(tmp1)) {
+                               tmp2 = sibling->rb_left;
+                               if (!tmp2 || rb_is_black(tmp2)) {
+                                       /*
+                                        * Case 2 - sibling color flip
+                                        * (p could be either color here)
+                                        *
+                                        *    (p)           (p)
+                                        *    / \           / \
+                                        *   N   S    -->  N   s
+                                        *      / \           / \
+                                        *     Sl  Sr        Sl  Sr
+                                        *
+                                        * This leaves us violating 5) which
+                                        * can be fixed by flipping p to black
+                                        * if it was red, or by recursing at p.
+                                        * p is red when coming from Case 1.
+                                        */
+                                       rb_set_parent_color(sibling, parent,
+                                                           RB_RED);
+                                       if (rb_is_red(parent))
+                                               rb_set_black(parent);
+                                       else {
+                                               node = parent;
+                                               parent = rb_parent(node);
+                                               if (parent)
+                                                       continue;
+                                       }
+                                       break;
+                               }
+                               /*
+                                * Case 3 - right rotate at sibling
+                                * (p could be either color here)
+                                *
+                                *   (p)           (p)
+                                *   / \           / \
+                                *  N   S    -->  N   Sl
+                                *     / \             \
+                                *    sl  Sr            s
+                                *                       \
+                                *                        Sr
+                                */
+                               sibling->rb_left = tmp1 = tmp2->rb_right;
+                               tmp2->rb_right = sibling;
+                               parent->rb_right = tmp2;
+                               if (tmp1)
+                                       rb_set_parent_color(tmp1, sibling,
+                                                           RB_BLACK);
+                               augment_rotate(sibling, tmp2);
+                               tmp1 = sibling;
+                               sibling = tmp2;
+                       }
+                       /*
+                        * Case 4 - left rotate at parent + color flips
+                        * (p and sl could be either color here.
+                        *  After rotation, p becomes black, s acquires
+                        *  p's color, and sl keeps its color)
+                        *
+                        *      (p)             (s)
+                        *      / \             / \
+                        *     N   S     -->   P   Sr
+                        *        / \         / \
+                        *      (sl) sr      N  (sl)
+                        */
+                       parent->rb_right = tmp2 = sibling->rb_left;
+                       sibling->rb_left = parent;
+                       rb_set_parent_color(tmp1, sibling, RB_BLACK);
+                       if (tmp2)
+                               rb_set_parent(tmp2, parent);
+                       __rb_rotate_set_parents(parent, sibling, root,
+                                               RB_BLACK);
+                       augment_rotate(parent, sibling);
+                       break;
+               } else {
+                       sibling = parent->rb_left;
+                       if (rb_is_red(sibling)) {
+                               /* Case 1 - right rotate at parent */
+                               parent->rb_left = tmp1 = sibling->rb_right;
+                               sibling->rb_right = parent;
+                               rb_set_parent_color(tmp1, parent, RB_BLACK);
+                               __rb_rotate_set_parents(parent, sibling, root,
+                                                       RB_RED);
+                               augment_rotate(parent, sibling);
+                               sibling = tmp1;
+                       }
+                       tmp1 = sibling->rb_left;
+                       if (!tmp1 || rb_is_black(tmp1)) {
+                               tmp2 = sibling->rb_right;
+                               if (!tmp2 || rb_is_black(tmp2)) {
+                                       /* Case 2 - sibling color flip */
+                                       rb_set_parent_color(sibling, parent,
+                                                           RB_RED);
+                                       if (rb_is_red(parent))
+                                               rb_set_black(parent);
+                                       else {
+                                               node = parent;
+                                               parent = rb_parent(node);
+                                               if (parent)
+                                                       continue;
+                                       }
+                                       break;
+                               }
+                               /* Case 3 - right rotate at sibling */
+                               sibling->rb_right = tmp1 = tmp2->rb_left;
+                               tmp2->rb_left = sibling;
+                               parent->rb_left = tmp2;
+                               if (tmp1)
+                                       rb_set_parent_color(tmp1, sibling,
+                                                           RB_BLACK);
+                               augment_rotate(sibling, tmp2);
+                               tmp1 = sibling;
+                               sibling = tmp2;
+                       }
+                       /* Case 4 - left rotate at parent + color flips */
+                       parent->rb_left = tmp2 = sibling->rb_right;
+                       sibling->rb_right = parent;
+                       rb_set_parent_color(tmp1, sibling, RB_BLACK);
+                       if (tmp2)
+                               rb_set_parent(tmp2, parent);
+                       __rb_rotate_set_parents(parent, sibling, root,
+                                               RB_BLACK);
+                       augment_rotate(parent, sibling);
+                       break;
+               }
+       }
+}
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       ____rb_erase_color(parent, root, augment_rotate);
+}
+
+/*
+ * Non-augmented rbtree manipulation functions.
+ *
+ * We use dummy augmented callbacks here, and have the compiler optimize them
+ * out of the rb_insert_color() and rb_erase() function definitions.
+ */
+
+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
+
+static const struct rb_augment_callbacks dummy_callbacks = {
+       dummy_propagate, dummy_copy, dummy_rotate
+};
+
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+       __rb_insert(node, root, dummy_rotate);
+}
+
+void rb_erase(struct rb_node *node, struct rb_root *root)
+{
+       struct rb_node *rebalance;
+       rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+       if (rebalance)
+               ____rb_erase_color(rebalance, root, dummy_rotate);
+}
+
+/*
+ * Augmented rbtree manipulation functions.
+ *
+ * This instantiates the same __always_inline functions as in the non-augmented
+ * case, but this time with user-defined callbacks.
+ */
+
+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       __rb_insert(node, root, augment_rotate);
+}
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *rb_first(const struct rb_root *root)
+{
+       struct rb_node  *n;
+
+       n = root->rb_node;
+       if (!n)
+               return NULL;
+       while (n->rb_left)
+               n = n->rb_left;
+       return n;
+}
+
+struct rb_node *rb_last(const struct rb_root *root)
+{
+       struct rb_node  *n;
+
+       n = root->rb_node;
+       if (!n)
+               return NULL;
+       while (n->rb_right)
+               n = n->rb_right;
+       return n;
+}
+
+struct rb_node *rb_next(const struct rb_node *node)
+{
+       struct rb_node *parent;
+
+       if (RB_EMPTY_NODE(node))
+               return NULL;
+
+       /*
+        * If we have a right-hand child, go down and then left as far
+        * as we can.
+        */
+       if (node->rb_right) {
+               node = node->rb_right;
+               while (node->rb_left)
+                       node=node->rb_left;
+               return (struct rb_node *)node;
+       }
+
+       /*
+        * No right-hand children. Everything down and left is smaller than us,
+        * so any 'next' node must be in the general direction of our parent.
+        * Go up the tree; any time the ancestor is a right-hand child of its
+        * parent, keep going up. First time it's a left-hand child of its
+        * parent, said parent is our 'next' node.
+        */
+       while ((parent = rb_parent(node)) && node == parent->rb_right)
+               node = parent;
+
+       return parent;
+}
+
+struct rb_node *rb_prev(const struct rb_node *node)
+{
+       struct rb_node *parent;
+
+       if (RB_EMPTY_NODE(node))
+               return NULL;
+
+       /*
+        * If we have a left-hand child, go down and then right as far
+        * as we can.
+        */
+       if (node->rb_left) {
+               node = node->rb_left;
+               while (node->rb_right)
+                       node=node->rb_right;
+               return (struct rb_node *)node;
+       }
+
+       /*
+        * No left-hand children. Go up till we find an ancestor which
+        * is a right-hand child of its parent.
+        */
+       while ((parent = rb_parent(node)) && node == parent->rb_left)
+               node = parent;
+
+       return parent;
+}
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+                    struct rb_root *root)
+{
+       struct rb_node *parent = rb_parent(victim);
+
+       /* Set the surrounding nodes to point to the replacement */
+       __rb_change_child(victim, new, parent, root);
+       if (victim->rb_left)
+               rb_set_parent(victim->rb_left, new);
+       if (victim->rb_right)
+               rb_set_parent(victim->rb_right, new);
+
+       /* Copy the pointers/colour from the victim to the replacement */
+       *new = *victim;
+}
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+       for (;;) {
+               if (node->rb_left)
+                       node = node->rb_left;
+               else if (node->rb_right)
+                       node = node->rb_right;
+               else
+                       return (struct rb_node *)node;
+       }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+       const struct rb_node *parent;
+       if (!node)
+               return NULL;
+       parent = rb_parent(node);
+
+       /* If we're sitting on node, we've already seen our children */
+       if (parent && node == parent->rb_left && parent->rb_right) {
+               /* If we are the parent's left node, go to the parent's right
+                * node then all the way down to the left */
+               return rb_left_deepest_node(parent->rb_right);
+       } else
+               /* Otherwise we are the parent's right node, and the parent
+                * should be next */
+               return (struct rb_node *)parent;
+}
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+       if (!root->rb_node)
+               return NULL;
+
+       return rb_left_deepest_node(root->rb_node);
+}
index fe50a1b34aa0035dec38a07a752ae33df9ad0e5d..09dc0aabb5154cb0906b6299afed4a3d0fc009c8 100644 (file)
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
 tools/lib/api
+tools/lib/rbtree.c
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
@@ -44,6 +45,8 @@ tools/include/linux/kernel.h
 tools/include/linux/list.h
 tools/include/linux/log2.h
 tools/include/linux/poison.h
+tools/include/linux/rbtree.h
+tools/include/linux/rbtree_augmented.h
 tools/include/linux/types.h
 include/asm-generic/bitops/arch_hweight.h
 include/asm-generic/bitops/const_hweight.h
@@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h
 include/asm-generic/bitops/__fls.h
 include/asm-generic/bitops/fls.h
 include/linux/perf_event.h
-include/linux/rbtree.h
 include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
 lib/hweight.c
-lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
 arch/*/include/uapi/asm/unistd*.h
@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
 include/linux/poison.h
 include/linux/hw_breakpoint.h
-include/linux/rbtree_augmented.h
 include/uapi/linux/perf_event.h
 include/uapi/linux/const.h
 include/uapi/linux/swab.h
index 586a59d46022a9fc8901807f5c02be4e2551db25..601d11440596dfd2b1d5244f56e972487d3fecff 100644 (file)
@@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
-$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE
+$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
deleted file mode 100644 (file)
index f06d89f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __TOOLS_LINUX_PERF_RBTREE_H
-#define __TOOLS_LINUX_PERF_RBTREE_H
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree.h"
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
- */
-static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
-       rb_erase(n, root);
-       RB_CLEAR_NODE(n);
-}
-#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
deleted file mode 100644 (file)
index 9d6fcdf..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree_augmented.h"
index 8e9b64520ec1096c4ffe508970fe41db36fe04e6..f56914c7929b80169efc6f3079cb1b1fae8cd269 100644 (file)
@@ -1,3 +1,6 @@
+ldflags-y += --wrap=ioremap_wt
+ldflags-y += --wrap=ioremap_wc
+ldflags-y += --wrap=devm_ioremap_nocache
 ldflags-y += --wrap=ioremap_cache
 ldflags-y += --wrap=ioremap_nocache
 ldflags-y += --wrap=iounmap
index c85a6f6ba5593e0cdf1834cc314c136a6d339fff..64bfaa50831ccd99f999ebb08fbb35eb0248820c 100644 (file)
@@ -65,6 +65,21 @@ void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
        return fallback_fn(offset, size);
 }
 
+void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
+               resource_size_t offset, unsigned long size)
+{
+       struct nfit_test_resource *nfit_res;
+
+       rcu_read_lock();
+       nfit_res = get_nfit_res(offset);
+       rcu_read_unlock();
+       if (nfit_res)
+               return (void __iomem *) nfit_res->buf + offset
+                       - nfit_res->res->start;
+       return devm_ioremap_nocache(dev, offset, size);
+}
+EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
+
 void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size)
 {
        return __nfit_test_ioremap(offset, size, ioremap_cache);
@@ -77,6 +92,18 @@ void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
 }
 EXPORT_SYMBOL(__wrap_ioremap_nocache);
 
+void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size)
+{
+       return __nfit_test_ioremap(offset, size, ioremap_wt);
+}
+EXPORT_SYMBOL(__wrap_ioremap_wt);
+
+void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
+{
+       return __nfit_test_ioremap(offset, size, ioremap_wc);
+}
+EXPORT_SYMBOL(__wrap_ioremap_wc);
+
 void __wrap_iounmap(volatile void __iomem *addr)
 {
        struct nfit_test_resource *nfit_res;
index 4b69b8368de01feb32b5521835efed0b8d372148..d0bdae40ccc9033aae0e9695943d4ce5367bde33 100644 (file)
@@ -128,6 +128,8 @@ struct nfit_test {
        int num_pm;
        void **dimm;
        dma_addr_t *dimm_dma;
+       void **flush;
+       dma_addr_t *flush_dma;
        void **label;
        dma_addr_t *label_dma;
        void **spa_set;
@@ -155,7 +157,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
        int i, rc;
 
        if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
-               return -ENXIO;
+               return -ENOTTY;
 
        /* lookup label space for the given dimm */
        for (i = 0; i < ARRAY_SIZE(handle); i++)
@@ -331,7 +333,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
                        + sizeof(struct acpi_nfit_system_address) * NUM_SPA
                        + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
                        + sizeof(struct acpi_nfit_control_region) * NUM_DCR
-                       + sizeof(struct acpi_nfit_data_region) * NUM_BDW;
+                       + sizeof(struct acpi_nfit_data_region) * NUM_BDW
+                       + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
        int i;
 
        t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -356,6 +359,10 @@ static int nfit_test0_alloc(struct nfit_test *t)
                if (!t->label[i])
                        return -ENOMEM;
                sprintf(t->label[i], "label%d", i);
+
+               t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
+               if (!t->flush[i])
+                       return -ENOMEM;
        }
 
        for (i = 0; i < NUM_DCR; i++) {
@@ -408,6 +415,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        struct acpi_nfit_system_address *spa;
        struct acpi_nfit_control_region *dcr;
        struct acpi_nfit_data_region *bdw;
+       struct acpi_nfit_flush_address *flush;
        unsigned int offset;
 
        nfit_test_init_header(nfit_buf, size);
@@ -831,6 +839,39 @@ static void nfit_test0_setup(struct nfit_test *t)
        bdw->capacity = DIMM_SIZE;
        bdw->start_address = 0;
 
+       offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
+       /* flush0 (dimm0) */
+       flush = nfit_buf + offset;
+       flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+       flush->header.length = sizeof(struct acpi_nfit_flush_address);
+       flush->device_handle = handle[0];
+       flush->hint_count = 1;
+       flush->hint_address[0] = t->flush_dma[0];
+
+       /* flush1 (dimm1) */
+       flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
+       flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+       flush->header.length = sizeof(struct acpi_nfit_flush_address);
+       flush->device_handle = handle[1];
+       flush->hint_count = 1;
+       flush->hint_address[0] = t->flush_dma[1];
+
+       /* flush2 (dimm2) */
+       flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
+       flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+       flush->header.length = sizeof(struct acpi_nfit_flush_address);
+       flush->device_handle = handle[2];
+       flush->hint_count = 1;
+       flush->hint_address[0] = t->flush_dma[2];
+
+       /* flush3 (dimm3) */
+       flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
+       flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+       flush->header.length = sizeof(struct acpi_nfit_flush_address);
+       flush->device_handle = handle[3];
+       flush->hint_count = 1;
+       flush->hint_address[0] = t->flush_dma[3];
+
        acpi_desc = &t->acpi_desc;
        set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
        set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
@@ -933,6 +974,10 @@ static int nfit_test_probe(struct platform_device *pdev)
                                GFP_KERNEL);
                nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
                                GFP_KERNEL);
+               nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
+                               GFP_KERNEL);
+               nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
+                               GFP_KERNEL);
                nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
                                GFP_KERNEL);
                nfit_test->label_dma = devm_kcalloc(dev, num,
@@ -943,7 +988,8 @@ static int nfit_test_probe(struct platform_device *pdev)
                                sizeof(dma_addr_t), GFP_KERNEL);
                if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
                                && nfit_test->label_dma && nfit_test->dcr
-                               && nfit_test->dcr_dma)
+                               && nfit_test->dcr_dma && nfit_test->flush
+                               && nfit_test->flush_dma)
                        /* pass */;
                else
                        return -ENOMEM;