]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'wireless-drivers-for-davem-2017-12-08' of git://git.kernel.org/pub/scm...
authorDavid S. Miller <davem@davemloft.net>
Fri, 8 Dec 2017 19:48:49 +0000 (14:48 -0500)
committerDavid S. Miller <davem@davemloft.net>
Fri, 8 Dec 2017 19:48:49 +0000 (14:48 -0500)
Kalle Valo says:

====================
wireless-drivers fixes for 4.15

Second set of fixes for 4.15. This time a lot of iwlwifi patches and
two brcmfmac patches. Most important here are the MIC and IVC fixes
for iwlwifi to unbreak 9000 series.

iwlwifi

* fix rate-scaling to not start lowest possible rate

* fix the TX queue hang detection for AP/GO modes

* fix the TX queue hang timeout in monitor interfaces

* fix packet injection

* remove a wrong error message when dumping PCI registers

* fix race condition with RF-kill

* tell mac80211 when the MIC has been stripped (9000 series)

* tell mac80211 when the IVC has been stripped (9000 series)

* add 2 new PCI IDs, one for 9000 and one for 22000

* fix a queue hang due during a P2P Remain-on-Channel operation

brcmfmac

* fix a race which sometimes caused a crash during sdio unbind

* fix a kernel-doc related build error
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
521 files changed:
Documentation/devicetree/bindings/hwmon/jc42.txt
Documentation/sysctl/vm.txt
MAINTAINERS
Makefile
arch/alpha/include/uapi/asm/Kbuild
arch/arc/include/uapi/asm/Kbuild
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/uapi/asm/Kbuild
arch/arm/kernel/entry-header.S
arch/arm64/Makefile
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/perf_event.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ftrace-mod.S [deleted file]
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/module.lds
arch/arm64/kernel/perf_event.c
arch/arm64/mm/context.c
arch/arm64/mm/pgd.c
arch/blackfin/include/uapi/asm/Kbuild
arch/c6x/include/uapi/asm/Kbuild
arch/cris/include/uapi/asm/Kbuild
arch/frv/include/uapi/asm/Kbuild
arch/h8300/include/uapi/asm/Kbuild
arch/hexagon/include/uapi/asm/Kbuild
arch/ia64/include/uapi/asm/Kbuild
arch/m32r/include/uapi/asm/Kbuild
arch/m68k/include/uapi/asm/Kbuild
arch/metag/include/uapi/asm/Kbuild
arch/microblaze/include/uapi/asm/Kbuild
arch/mips/include/asm/pgtable.h
arch/mips/include/uapi/asm/Kbuild
arch/mips/kvm/mips.c
arch/mn10300/include/uapi/asm/Kbuild
arch/nios2/include/uapi/asm/Kbuild
arch/openrisc/include/uapi/asm/Kbuild
arch/parisc/include/uapi/asm/Kbuild
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/hash_native_64.c
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/atomic.h
arch/riscv/include/asm/barrier.h
arch/riscv/include/asm/bitops.h
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/mmu_context.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/spinlock.h
arch/riscv/include/asm/timex.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/include/asm/vdso-syscalls.h [new file with mode: 0644]
arch/riscv/include/asm/vdso.h
arch/riscv/include/uapi/asm/Kbuild
arch/riscv/kernel/head.S
arch/riscv/kernel/riscv_ksyms.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/clock_getres.S [new file with mode: 0644]
arch/riscv/kernel/vdso/clock_gettime.S [new file with mode: 0644]
arch/riscv/kernel/vdso/flush_icache.S [new file with mode: 0644]
arch/riscv/kernel/vdso/getcpu.S [new file with mode: 0644]
arch/riscv/kernel/vdso/gettimeofday.S [new file with mode: 0644]
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/lib/delay.c
arch/riscv/mm/Makefile
arch/riscv/mm/cacheflush.c [new file with mode: 0644]
arch/riscv/mm/ioremap.c
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/appldata/appldata_mem.c
arch/s390/appldata/appldata_net_sum.c
arch/s390/appldata/appldata_os.c
arch/s390/boot/install.sh
arch/s390/crypto/aes_s390.c
arch/s390/crypto/arch_random.c
arch/s390/crypto/crc32-vx.c
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/paes_s390.c
arch/s390/crypto/prng.c
arch/s390/crypto/sha.h
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/s390/crypto/sha_common.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/livepatch.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/perf_event.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/sysinfo.h
arch/s390/include/asm/topology.h
arch/s390/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_para.h
arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/virtio-ccw.h
arch/s390/include/uapi/asm/zcrypt.h
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/lgr.c
arch/s390/kernel/module.c
arch/s390/kernel/nmi.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/sthyi.c
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/clock_getres.S
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/clock_getres.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/s390/kernel/vtime.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/cmm.c
arch/s390/mm/gmap.c
arch/s390/mm/mmap.c
arch/s390/mm/pgtable.c
arch/s390/pci/pci.c
arch/s390/pci/pci_debug.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_insn.c
arch/score/include/uapi/asm/Kbuild
arch/sh/include/uapi/asm/Kbuild
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/uapi/asm/Kbuild
arch/sparc/mm/gup.c
arch/tile/include/asm/pgtable.h
arch/tile/include/uapi/asm/Kbuild
arch/unicore32/include/uapi/asm/Kbuild
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pgtable.h
arch/x86/include/uapi/asm/Kbuild
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/xtensa/include/uapi/asm/Kbuild
block/bio.c
block/blk-sysfs.c
block/blk-wbt.c
block/genhd.c
drivers/acpi/device_sysfs.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/scan.c
drivers/block/null_blk.c
drivers/cpufreq/Kconfig
drivers/cpufreq/mediatek-cpufreq.c
drivers/dax/device.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
drivers/gpu/drm/amd/display/dc/inc/core_status.h
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/bridge/adv7511/adv7511.h
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/lvds-encoder.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_gemfs.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/omapdrm/displays/Kconfig
drivers/gpu/drm/omapdrm/dss/dpi.c
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/hwmon/jc42.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/i2c-boardinfo.c
drivers/infiniband/core/umem.c
drivers/md/bcache/alloc.c
drivers/md/bcache/btree.c
drivers/md/bcache/extents.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/misc/cxl/pci.c
drivers/misc/eeprom/at24.c
drivers/mmc/core/block.c
drivers/mmc/core/bus.c
drivers/mmc/core/debugfs.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci.c
drivers/net/can/flexcan.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/mcba_usb.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/phy/micrel.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/tap.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/xen-netback/interface.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/xpram.c
drivers/s390/char/fs3270.c
drivers/s390/char/hmcdrv_mod.c
drivers/s390/char/monreader.c
drivers/s390/char/monwriter.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp_async.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_class.c
drivers/s390/char/tape_core.c
drivers/s390/char/tty3270.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmur.c
drivers/s390/char/zcore.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/isc.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/cio/scm.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_api.h
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_cca_key.h
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2a.h
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype50.h
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/s390/crypto/zcrypt_msgtype6.h
drivers/s390/crypto/zcrypt_pcixcc.c
drivers/s390/crypto/zcrypt_pcixcc.h
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/fsm.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/smsgiucv.c
drivers/s390/net/smsgiucv_app.c
drivers/s390/scsi/zfcp_aux.c
drivers/s390/virtio/Makefile
drivers/s390/virtio/virtio_ccw.c
drivers/staging/lustre/lnet/lnet/lib-socket.c
drivers/vhost/net.c
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
fs/afs/internal.h
fs/afs/security.c
fs/afs/super.c
fs/autofs4/root.c
fs/dax.c
fs/exec.c
fs/fat/inode.c
fs/hugetlbfs/inode.c
fs/mbcache.c
fs/namei.c
fs/nfs/nfs4state.c
fs/quota/dquot.c
fs/reiserfs/super.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/scrub/inode.c
fs/xfs/scrub/quota.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_item.c
fs/xfs/xfs_bmap_item.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_dquot_item.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_refcount_item.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/asm-generic/pgtable.h
include/drm/ttm/ttm_page_alloc.h
include/lib/libgcc.h [deleted file]
include/linux/fs.h
include/linux/hugetlb.h
include/linux/kvm_host.h
include/linux/libgcc.h [new file with mode: 0644]
include/linux/migrate.h
include/linux/mm.h
include/linux/perf_event.h
include/linux/rculist_nulls.h
include/linux/tcp.h
include/linux/usb/usbnet.h
include/net/red.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/trace/events/xdp.h
include/uapi/asm-generic/bpf_perf_event.h [new file with mode: 0644]
include/uapi/linux/bcache.h
include/uapi/linux/bpf_perf_event.h
include/uapi/linux/kfd_ioctl.h
kernel/bpf/core.c
kernel/bpf/offload.c
kernel/events/core.c
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
lib/ashldi3.c
lib/ashrdi3.c
lib/cmpdi2.c
lib/lshrdi3.c
lib/muldi3.c
lib/nlattr.c
lib/ucmpdi2.c
mm/backing-dev.c
mm/frame_vector.c
mm/gup.c
mm/hmm.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
net/9p/trans_fd.c
net/core/dev.c
net/dccp/minisocks.c
net/dccp/proto.c
net/ipv4/inet_timewait_sock.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_rate.c
net/ipv4/tcp_recovery.c
net/ipv6/ip6_tunnel.c
net/ipv6/tcp_ipv6.c
net/kcm/kcmsock.c
net/rds/rdma.c
net/rxrpc/af_rxrpc.c
net/sched/sch_choke.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sctp/chunk.c
net/sctp/outqueue.c
net/sctp/socket.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/xprtsock.c
net/tipc/server.c
net/tipc/udp_media.c
net/vmw_vsock/hyperv_transport.c
samples/bpf/bpf_load.c
scripts/bloat-o-meter
scripts/faddr2line
security/apparmor/include/audit.h
tools/arch/arm64/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/ptrace.h [new file with mode: 0644]
tools/bpf/bpftool/Documentation/Makefile
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/include/uapi/asm-generic/bpf_perf_event.h [new file with mode: 0644]
tools/include/uapi/linux/bpf_perf_event.h
tools/perf/arch/s390/Makefile
tools/perf/arch/s390/util/dwarf-regs.c
tools/perf/check-headers.sh
tools/power/cpupower/bench/system.c
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_verifier_log.c
virt/kvm/arm/arm.c
virt/kvm/kvm_main.c

index 07a250498fbb4cccb3210a418c572907d8310c43..f569db58f64a100b6c72e1053ce443b77ae102c2 100644 (file)
@@ -34,6 +34,10 @@ Required properties:
 
 - reg: I2C address
 
+Optional properties:
+- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
+                        This is not supported on all chips.
+
 Example:
 
 temp-sensor@1a {
index b920423f88cbcb19e04a7838359ef04dace8f431..5025ff9307e66c590a4a72795f9e2f75f0fbddc8 100644 (file)
@@ -158,10 +158,6 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
 retained.
 
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
 ==============================================================
 
 dirty_expire_centisecs
@@ -181,9 +177,6 @@ generating disk writes will itself start writing out dirty data.
 
 The total available memory is not equal to total system memory.
 
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
 ==============================================================
 
 dirty_writeback_centisecs
index 77d819b458a99fb58c1f2f49b24b40c4681ef77a..745337ed01f6b67294a4d185bbf8d17b4717af4c 100644 (file)
@@ -554,13 +554,13 @@ S:        Orphan
 F:     Documentation/filesystems/affs.txt
 F:     fs/affs/
 
-AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN
+AFS FILESYSTEM
 M:     David Howells <dhowells@redhat.com>
 L:     linux-afs@lists.infradead.org
 S:     Supported
 F:     fs/afs/
-F:     include/net/af_rxrpc.h
-F:     net/rxrpc/af_rxrpc.c
+F:     include/trace/events/afs.h
+F:     Documentation/filesystems/afs.txt
 W:     https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
@@ -6174,7 +6174,6 @@ M:        Jean Delvare <jdelvare@suse.com>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
 W:     http://hwmon.wiki.kernel.org/
-T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
@@ -11777,6 +11776,18 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtl8xxxu/
 
+RXRPC SOCKETS (AF_RXRPC)
+M:     David Howells <dhowells@redhat.com>
+L:     linux-afs@lists.infradead.org
+S:     Supported
+F:     net/rxrpc/
+F:     include/keys/rxrpc-type.h
+F:     include/net/af_rxrpc.h
+F:     include/trace/events/rxrpc.h
+F:     include/uapi/linux/rxrpc.h
+F:     Documentation/networking/rxrpc.txt
+W:     https://www.infradead.org/~dhowells/kafs/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
 L:     linux-fbdev@vger.kernel.org
@@ -13648,10 +13659,8 @@ F:     drivers/net/wireless/ti/
 F:     include/linux/wl12xx.h
 
 TILE ARCHITECTURE
-M:     Chris Metcalf <cmetcalf@mellanox.com>
 W:     http://www.mellanox.com/repository/solutions/tile-scm/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
-S:     Supported
+S:     Orphan
 F:     arch/tile/
 F:     drivers/char/tile-srom.c
 F:     drivers/edac/tile_edac.c
index f761bf475ba5249292af4b086770497199e0b757..c988e46a53cd78cfa3c8759fada57c1ef1ffb3ec 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index b15bf6bc0e94f46f035e8781ffa921060341fe91..14a2e9af97e9992d87821e8f11276ecfef8e57cf 100644 (file)
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
index fa6d0ff4ff894be699616eefad77cd6a2347a3b7..170b5db64afeb7f74fb8279887a7cb75e0205c7b 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 2a029bceaf2f8593788dea27ec05f3664fc9733a..1a7a17b2a1bae97a21fca6a4920efd96540ac43d 100644 (file)
@@ -221,7 +221,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
 }
 #define        __HAVE_ARCH_PTE_SPECIAL
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_dirty(pmd)         (pmd_isset((pmd), L_PMD_SECT_DIRTY))
 #define pud_page(pud)          pmd_page(__pmd(pud_val(pud)))
index 4d53de308ee089a7b745926ab8da16caa825806e..4d1cc1847edf076dfb3ea03db6712803a851d28b 100644 (file)
@@ -7,6 +7,7 @@ generated-y += unistd-oabi.h
 generated-y += unistd-eabi.h
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 7f4d80c2db6bf128451c8390b7f9048ca413034c..0f07579af472c8ec869c5d87fd8d1e105a24dcba 100644 (file)
        mov     r2, sp
        ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [r2, #\offset + S_PC]!      @ get pc
-       tst     r1, #0xcf
+       tst     r1, #PSR_I_BIT | 0x0f
        bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [sp, #\offset + S_PC]       @ get pc
        add     sp, sp, #\offset + S_SP
-       tst     r1, #0xcf
+       tst     r1, #PSR_I_BIT | 0x0f
        bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 
index b35788c909f1ce826df3717e14841836a9398070..b481b4a7c0111472baece0c0f5bcc9b985fb928a 100644 (file)
@@ -83,9 +83,6 @@ endif
 
 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
 KBUILD_LDFLAGS_MODULE  += -T $(srctree)/arch/arm64/kernel/module.lds
-ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
-KBUILD_LDFLAGS_MODULE  += $(objtree)/arch/arm64/kernel/ftrace-mod.o
-endif
 endif
 
 # Default value
index 76d1cc85d5b115915aaa63138121e9ba286d8f4e..955130762a3c6acc09f3ee76574f7cefc5097b4c 100644 (file)
@@ -38,7 +38,7 @@
  *
  *     See Documentation/cachetlb.txt for more information. Please note that
  *     the implementation assumes non-aliasing VIPT D-cache and (aliasing)
- *     VIPT or ASID-tagged VIVT I-cache.
+ *     VIPT I-cache.
  *
  *     flush_cache_mm(mm)
  *
index 19bd97671bb8d4e78a2a3d46ef890fa06a8092fe..4f766178fa6ff3184963c1caaccaf646fd91a4a2 100644 (file)
@@ -32,7 +32,7 @@ struct mod_arch_specific {
        struct mod_plt_sec      init;
 
        /* for CONFIG_DYNAMIC_FTRACE */
-       void                    *ftrace_trampoline;
+       struct plt_entry        *ftrace_trampoline;
 };
 #endif
 
@@ -45,4 +45,48 @@ extern u64 module_alloc_base;
 #define module_alloc_base      ((u64)_etext - MODULES_VSIZE)
 #endif
 
+struct plt_entry {
+       /*
+        * A program that conforms to the AArch64 Procedure Call Standard
+        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
+        * IP1 (x17) may be inserted at any branch instruction that is
+        * exposed to a relocation that supports long branches. Since that
+        * is exactly what we are dealing with here, we are free to use x16
+        * as a scratch register in the PLT veneers.
+        */
+       __le32  mov0;   /* movn x16, #0x....                    */
+       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
+       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
+       __le32  br;     /* br   x16                             */
+};
+
+static inline struct plt_entry get_plt_entry(u64 val)
+{
+       /*
+        * MOVK/MOVN/MOVZ opcode:
+        * +--------+------------+--------+-----------+-------------+---------+
+        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
+        * +--------+------------+--------+-----------+-------------+---------+
+        *
+        * Rd     := 0x10 (x16)
+        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
+        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
+        * sf     := 1 (64-bit variant)
+        */
+       return (struct plt_entry){
+               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
+               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
+               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
+               cpu_to_le32(0xd61f0200)
+       };
+}
+
+static inline bool plt_entries_equal(const struct plt_entry *a,
+                                    const struct plt_entry *b)
+{
+       return a->mov0 == b->mov0 &&
+              a->mov1 == b->mov1 &&
+              a->mov2 == b->mov2;
+}
+
 #endif /* __ASM_MODULE_H */
index 8d5cbec17d803e37556b5f4a7b25e7b4f1391b35..f9ccc36d3dc3cb2e29ad2cd47f7cdd5eed4cef8a 100644 (file)
@@ -18,6 +18,7 @@
 #define __ASM_PERF_EVENT_H
 
 #include <asm/stack_pointer.h>
+#include <asm/ptrace.h>
 
 #define        ARMV8_PMU_MAX_COUNTERS  32
 #define        ARMV8_PMU_COUNTER_MASK  (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -79,6 +80,7 @@ struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs)  perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
 #endif
 
 #define perf_arch_fetch_caller_regs(regs, __ip) { \
index c9530b5b5ca836cbe23216d664e3ea9939d3b126..149d05fb9421520bd659b62627941ed36ce46bb3 100644 (file)
@@ -345,7 +345,6 @@ static inline int pmd_protnone(pmd_t pmd)
 
 #define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 
 #define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 8265dd79089522ec140ba8094da70f7f075d2884..067baace74a09b9474cc40cf9a4900c1486a8a2c 100644 (file)
@@ -61,6 +61,3 @@ extra-y                                       += $(head-y) vmlinux.lds
 ifeq ($(CONFIG_DEBUG_EFI),y)
 AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
 endif
-
-# will be included by each individual module but not by the core kernel itself
-extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
index d16978213c5b332b439205f7c582e1c9a2f65e4d..ea001241bdd470ab4a0a13ba4dad9bdb5a818bae 100644 (file)
@@ -31,13 +31,13 @@ extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 
-static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
        &smp_spin_table_ops,
        &cpu_psci_ops,
        NULL,
 };
 
-static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
        &acpi_parking_protocol_ops,
 #endif
@@ -47,7 +47,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
 
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-       const struct cpu_operations **ops;
+       const struct cpu_operations *const *ops;
 
        ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
index 143b3e72c25e6c1b51c75582148c74ffe95a1a97..5084e699447a4d011742ad964448203c1d93337a 100644 (file)
@@ -1026,10 +1026,10 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
 
        local_bh_disable();
 
-       if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
-               current->thread.fpsimd_state = *state;
+       current->thread.fpsimd_state = *state;
+       if (system_supports_sve() && test_thread_flag(TIF_SVE))
                fpsimd_to_sve(current);
-       }
+
        task_fpsimd_load();
 
        if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S
deleted file mode 100644 (file)
index 00c4025..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-       .section        ".text.ftrace_trampoline", "ax"
-       .align          3
-0:     .quad           0
-__ftrace_trampoline:
-       ldr             x16, 0b
-       br              x16
-ENDPROC(__ftrace_trampoline)
index c13b1fca0e5baff4f95c280b523dd3da01ec2533..50986e388d2b27e92f6984914af4ce756ea0ee46 100644 (file)
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-               unsigned long *trampoline;
+               struct plt_entry trampoline;
                struct module *mod;
 
                /*
@@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * is added in the future, but for now, the pr_err() below
                 * deals with a theoretical issue only.
                 */
-               trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
-               if (trampoline[0] != addr) {
-                       if (trampoline[0] != 0) {
+               trampoline = get_plt_entry(addr);
+               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                      &trampoline)) {
+                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                              &(struct plt_entry){})) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
 
                        /* point the trampoline to our ftrace entry point */
                        module_disable_ro(mod);
-                       trampoline[0] = addr;
+                       *mod->arch.ftrace_trampoline = trampoline;
                        module_enable_ro(mod, true);
 
                        /* update trampoline before patching in the branch */
                        smp_wmb();
                }
-               addr = (unsigned long)&trampoline[1];
+               addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
 #else /* CONFIG_ARM64_MODULE_PLTS */
                return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
index d05dbe658409b251c9dd4c18348b77c1797e6973..ea640f92fe5adaf92526ee252fb8fbc73348d0b6 100644 (file)
 #include <linux/module.h>
 #include <linux/sort.h>
 
-struct plt_entry {
-       /*
-        * A program that conforms to the AArch64 Procedure Call Standard
-        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
-        * IP1 (x17) may be inserted at any branch instruction that is
-        * exposed to a relocation that supports long branches. Since that
-        * is exactly what we are dealing with here, we are free to use x16
-        * as a scratch register in the PLT veneers.
-        */
-       __le32  mov0;   /* movn x16, #0x....                    */
-       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
-       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
-       __le32  br;     /* br   x16                             */
-};
-
 static bool in_init(const struct module *mod, void *loc)
 {
        return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
@@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
        int i = pltsec->plt_num_entries;
        u64 val = sym->st_value + rela->r_addend;
 
-       /*
-        * MOVK/MOVN/MOVZ opcode:
-        * +--------+------------+--------+-----------+-------------+---------+
-        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
-        * +--------+------------+--------+-----------+-------------+---------+
-        *
-        * Rd     := 0x10 (x16)
-        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
-        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
-        * sf     := 1 (64-bit variant)
-        */
-       plt[i] = (struct plt_entry){
-               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
-               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
-               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
-               cpu_to_le32(0xd61f0200)
-       };
+       plt[i] = get_plt_entry(val);
 
        /*
         * Check if the entry we just created is a duplicate. Given that the
         * relocations are sorted, this will be the last entry we allocated.
         * (if one exists).
         */
-       if (i > 0 &&
-           plt[i].mov0 == plt[i - 1].mov0 &&
-           plt[i].mov1 == plt[i - 1].mov1 &&
-           plt[i].mov2 == plt[i - 1].mov2)
+       if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
                return (u64)&plt[i - 1];
 
        pltsec->plt_num_entries++;
@@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
        unsigned long core_plts = 0;
        unsigned long init_plts = 0;
        Elf64_Sym *syms = NULL;
+       Elf_Shdr *tramp = NULL;
        int i;
 
        /*
@@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
                        mod->arch.core.plt = sechdrs + i;
                else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
                        mod->arch.init.plt = sechdrs + i;
+               else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
+                        !strcmp(secstrings + sechdrs[i].sh_name,
+                                ".text.ftrace_trampoline"))
+                       tramp = sechdrs + i;
                else if (sechdrs[i].sh_type == SHT_SYMTAB)
                        syms = (Elf64_Sym *)sechdrs[i].sh_addr;
        }
@@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
        mod->arch.init.plt_num_entries = 0;
        mod->arch.init.plt_max_entries = init_plts;
 
+       if (tramp) {
+               tramp->sh_type = SHT_NOBITS;
+               tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+               tramp->sh_addralign = __alignof__(struct plt_entry);
+               tramp->sh_size = sizeof(struct plt_entry);
+       }
+
        return 0;
 }
index f7c9781a9d48b48396e61375da2e10f31c0b6296..22e36a21c1134576eb58a9209d75f2c6b2f09f85 100644 (file)
@@ -1,4 +1,5 @@
 SECTIONS {
        .plt (NOLOAD) : { BYTE(0) }
        .init.plt (NOLOAD) : { BYTE(0) }
+       .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
 }
index 9eaef51f83ff8d0ad7c15f54e84db43b14d5fbb3..3affca3dd96a3ee8c3c7bbb14085b08a7b657a79 100644 (file)
@@ -262,12 +262,6 @@ static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 
        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
-
-       [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-       [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-
-       [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-       [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
 };
 
 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
index ab9f5f0fb2c7fc6ada0605e31d8f73b0024ae277..6f4017046323f874e832cb07f5863283877179ba 100644 (file)
@@ -96,12 +96,6 @@ static void flush_context(unsigned int cpu)
 
        set_reserved_asid_bits();
 
-       /*
-        * Ensure the generation bump is observed before we xchg the
-        * active_asids.
-        */
-       smp_wmb();
-
        for_each_possible_cpu(i) {
                asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
                /*
@@ -117,7 +111,10 @@ static void flush_context(unsigned int cpu)
                per_cpu(reserved_asids, i) = asid;
        }
 
-       /* Queue a TLB invalidate and flush the I-cache if necessary. */
+       /*
+        * Queue a TLB invalidation for each CPU to perform on next
+        * context-switch
+        */
        cpumask_setall(&tlb_flush_pending);
 }
 
@@ -202,11 +199,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
        asid = atomic64_read(&mm->context.id);
 
        /*
-        * The memory ordering here is subtle. We rely on the control
-        * dependency between the generation read and the update of
-        * active_asids to ensure that we are synchronised with a
-        * parallel rollover (i.e. this pairs with the smp_wmb() in
-        * flush_context).
+        * The memory ordering here is subtle.
+        * If our ASID matches the current generation, then we update
+        * our active_asids entry with a relaxed xchg. Racing with a
+        * concurrent rollover means that either:
+        *
+        * - We get a zero back from the xchg and end up waiting on the
+        *   lock. Taking the lock synchronises with the rollover and so
+        *   we are forced to see the updated generation.
+        *
+        * - We get a valid ASID back from the xchg, which means the
+        *   relaxed xchg in flush_context will treat us as reserved
+        *   because atomic RmWs are totally ordered for a given location.
         */
        if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
            && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
index 371c5f03a1708c172e731abb07637e369681fec1..051e71ec3335edc316817602a51d8676678c1a0f 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 
-static struct kmem_cache *pgd_cache;
+static struct kmem_cache *pgd_cache __ro_after_init;
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
index aa624b4ab6557c3e22d3660819f25688d605e3ed..2240b38c2915fa725cf2c5d1afc322edd1bb47c0 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 67ee896a76a7f2f0837436e054b8146dbe7dbac3..26644e15d8540fa43cf70e47acf9ce837dd18fd5 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 3687b54bb18ed1a0f36d512af627b8085f15987b..3470c6e9c7b9ba1ca3b5962d276b42a4c2d2e35f 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index b15bf6bc0e94f46f035e8781ffa921060341fe91..14a2e9af97e9992d87821e8f11276ecfef8e57cf 100644 (file)
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
index 187aed820e71feac3ffd03e021387bc892bda5e9..2f65f78792cbe5cf7219bb2228fb84e05a0b9204 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index cb5df3aad3a848e27fdccfd2c36e848b1217bc27..41a176dbb53e4f16524157bae122180d572dd4f5 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 13a97aa2285f7418d18f1396f03bf6281b580a0f..f5c6967a93bb204ff41bfdee837d3ec4bdaa94dd 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
index 1c44d3b3eba03bac62b9a69e6e560aac61a0a3ff..451bf6071c6e28036f0da81dedb35c8b184f3b5a 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
 generic-y += siginfo.h
index 3717b64a620df54a46495f07a3597188b9f727ec..c2e26a44c482da3a6d87d9b26173d64e7c9ca78f 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 6ac763d9a3e34e3cfb0caaf0072f78c26ec8b03f..f9eaf07d29f84ab1871d49b89c11a107fb5e1149 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 06609ca361150ab77529bf0999d8e258ad25d62c..2c6a6bffea3265d3f3ef1b4d04f6c64347e395e4 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 9e9e94415d08f13db779b0d63ea42692f0705c64..1a508a74d48d3f70595a2c5981b114f3e37d1061 100644 (file)
@@ -552,7 +552,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                       pmd_t *pmdp, pmd_t pmd);
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return !!(pmd_val(pmd) & _PAGE_WRITE);
index a0266feba9e6d996d5469ed18fd23df081a2ab38..7a4becd8963a219331632203849998a0a3f56e03 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += ipcbuf.h
index d535edc01434117a8809fc21fb152226e0b46521..75fdeaa8c62f21a5420c963968c0188bbb459f49 100644 (file)
@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r = -EINTR;
-       sigset_t sigsaved;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (vcpu->mmio_needed) {
                if (!vcpu->mmio_is_write)
@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        local_irq_enable();
 
 out:
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index c94ee54210bc489efd469493800596cd2b7061a3..81271d3af47cb1000ebfab2539efa92080a1eccc 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y      += bpf_perf_event.h
 generic-y      += siginfo.h
index ffca24da7647b80e0f45728dff8da4e9474dc65f..13a3d77b4d7bdc487b814ae2933940638b62c759 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 62286dbeb9043c6ff6eecbd5859c23fd21ce1baa..130c16ccba0a0abb135e31275eb34c564d6cd700 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 196d2a4efb312be6d830fe2de80538d1f7aaf8f4..286ef5a5904b02d5f346dd783dbea9fdb6b35e70 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
 generic-y += param.h
 generic-y += poll.h
index 9a677cd5997f9a891c2ecc8f6e3bbd08c5c41dbe..44697817ccc6ddc13406dc30388d06d7e8795335 100644 (file)
@@ -1005,7 +1005,6 @@ static inline int pmd_protnone(pmd_t pmd)
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 #define __pmd_write(pmd)       __pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)    pte_savedwrite(pmd_pte(pmd))
index 96753f3aac6dd7e753ba9b2f9ad5e4ebba7f50aa..941c2a3f231b90686481b6e711dbded50f72eaf6 100644 (file)
@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
                struct iommu_group *grp);
 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
 
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                                struct kvm_create_spapr_tce_64 *args);
index 0d960ef78a9a95a682fe17d8e5050e3803a57323..1a6ed5919ffdb13878ab7f4a8c2f958c8a41f166 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += param.h
 generic-y += poll.h
 generic-y += resource.h
index 8ac0bd2bddb0c93b95dccfd61807807219913b5e..3280953a82cf63c4372735762a09804fd9c21677 100644 (file)
@@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
         * NOTE, we rely on r0 being 0 from above.
         */
        mtspr   SPRN_IAMR,r0
+BEGIN_FTR_SECTION_NESTED(42)
        mtspr   SPRN_AMOR,r0
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
        /* save regs for local vars on new stack.
index bfdd783e39166143e205fd2424bae692fdbe5aa8..5acb5a176dbe5c8bffe6ddb7458b7d3ac2b7019f 100644 (file)
@@ -1569,16 +1569,22 @@ void arch_release_task_struct(struct task_struct *t)
  */
 int set_thread_tidr(struct task_struct *t)
 {
+       int rc;
+
        if (!cpu_has_feature(CPU_FTR_ARCH_300))
                return -EINVAL;
 
        if (t != current)
                return -EINVAL;
 
-       t->thread.tidr = assign_thread_tidr();
-       if (t->thread.tidr < 0)
-               return t->thread.tidr;
+       if (t->thread.tidr)
+               return 0;
+
+       rc = assign_thread_tidr();
+       if (rc < 0)
+               return rc;
 
+       t->thread.tidr = rc;
        mtspr(SPRN_TIDR, t->thread.tidr);
 
        return 0;
index 235319c2574e07f03c3473d66e160e6e900204e2..966097232d2147bbcd79df41354a5f973fb9b7c1 100644 (file)
@@ -1238,8 +1238,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
        unsigned long vpte, rpte, guest_rpte;
        int ret;
        struct revmap_entry *rev;
-       unsigned long apsize, psize, avpn, pteg, hash;
+       unsigned long apsize, avpn, pteg, hash;
        unsigned long new_idx, new_pteg, replace_vpte;
+       int pshift;
 
        hptep = (__be64 *)(old->virt + (idx << 4));
 
@@ -1298,8 +1299,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                goto out;
 
        rpte = be64_to_cpu(hptep[1]);
-       psize = hpte_base_page_size(vpte, rpte);
-       avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23);
+       pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
+       avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
        pteg = idx / HPTES_PER_GROUP;
        if (vpte & HPTE_V_SECONDARY)
                pteg = ~pteg;
@@ -1311,20 +1312,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                offset = (avpn & 0x1f) << 23;
                vsid = avpn >> 5;
                /* We can find more bits from the pteg value */
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (offset / psize);
+               hash = vsid ^ (offset >> pshift);
        } else {
                unsigned long offset, vsid;
 
                /* We only have 40 - 23 bits of seg_off in avpn */
                offset = (avpn & 0x1ffff) << 23;
                vsid = avpn >> 17;
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (vsid << 25) ^ (offset / psize);
+               hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
        }
 
        new_pteg = hash & new_hash_mask;
@@ -1801,6 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
        ssize_t nb;
        long int err, ret;
        int mmu_ready;
+       int pshift;
 
        if (!access_ok(VERIFY_READ, buf, count))
                return -EFAULT;
@@ -1855,6 +1857,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                        err = -EINVAL;
                        if (!(v & HPTE_V_VALID))
                                goto out;
+                       pshift = kvmppc_hpte_base_page_shift(v, r);
+                       if (pshift <= 0)
+                               goto out;
                        lbuf += 2;
                        nb += HPTE_SIZE;
 
@@ -1869,14 +1874,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!mmu_ready && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_base_page_size(v, r);
-                               unsigned long senc = slb_pgsize_encoding(psize);
-                               unsigned long lpcr;
+                               unsigned long senc, lpcr;
 
+                               senc = slb_pgsize_encoding(1ul << pshift);
                                kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
                                        (VRMA_VSID << SLB_VSID_SHIFT_1T);
-                               lpcr = senc << (LPCR_VRMASD_SH - 4);
-                               kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+                               if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+                                       lpcr = senc << (LPCR_VRMASD_SH - 4);
+                                       kvmppc_update_lpcr(kvm, lpcr,
+                                                          LPCR_VRMASD);
+                               } else {
+                                       kvmppc_setup_partition_table(kvm);
+                               }
                                mmu_ready = 1;
                        }
                        ++i;
index 79ea3d9269dbf568904e504d78cc56850c77860d..2d46037ce93664199adee27806b8972d9130368d 100644 (file)
@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_setup_partition_table(struct kvm *kvm);
 
 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
                int *ip)
@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
        return;
 }
 
-static void kvmppc_setup_partition_table(struct kvm *kvm)
+void kvmppc_setup_partition_table(struct kvm *kvm)
 {
        unsigned long dw0, dw1;
 
index 6b6c53c42ac9455f2a8c4f157f402da772163336..1915e86cef6f8fc2e05852ddc7a0867eca1c560b 100644 (file)
@@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r;
-       sigset_t sigsaved;
 
        if (vcpu->mmio_needed) {
                vcpu->mmio_needed = 0;
@@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 #endif
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (run->immediate_exit)
                r = -EINTR;
        else
                r = kvmppc_vcpu_run(run, vcpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index 3848af167df9de4b183d7cb8472b5d20ce97d0a4..640cf566e98653ab43c06744b6cf9ef76622fa1c 100644 (file)
@@ -47,7 +47,8 @@
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
-static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
+                                               int apsize, int ssize)
 {
        unsigned long va;
        unsigned int penc;
@@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
                             : "memory");
                break;
        }
-       trace_tlbie(0, 0, va, 0, 0, 0, 0);
+       return va;
+}
+
+static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+       unsigned long rb;
+
+       rb = ___tlbie(vpn, psize, apsize, ssize);
+       trace_tlbie(0, 0, rb, 0, 0, 0, 0);
 }
 
 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
@@ -652,7 +661,7 @@ static void native_hpte_clear(void)
                if (hpte_v & HPTE_V_VALID) {
                        hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
                        hptep->v = 0;
-                       __tlbie(vpn, psize, apsize, ssize);
+                       ___tlbie(vpn, psize, apsize, ssize);
                }
        }
 
index 18158be62a2bfbb9f82250f889b61ea4d91f2da7..970460a0b492efe9fcf6bc126e06ab11074641ce 100644 (file)
@@ -40,6 +40,7 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += sembuf.h
+generic-y += serial.h
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
index 6cbbb6a68d76c2ba846fbcdeb65d91cf27e04f05..5ad4cb622bedf02f8b77bc27e7867c3cec6c7aca 100644 (file)
 #endif
 
 #if (__SIZEOF_INT__ == 4)
-#define INT            __ASM_STR(.word)
-#define SZINT          __ASM_STR(4)
-#define LGINT          __ASM_STR(2)
+#define RISCV_INT              __ASM_STR(.word)
+#define RISCV_SZINT            __ASM_STR(4)
+#define RISCV_LGINT            __ASM_STR(2)
 #else
 #error "Unexpected __SIZEOF_INT__"
 #endif
 
 #if (__SIZEOF_SHORT__ == 2)
-#define SHORT          __ASM_STR(.half)
-#define SZSHORT                __ASM_STR(2)
-#define LGSHORT                __ASM_STR(1)
+#define RISCV_SHORT            __ASM_STR(.half)
+#define RISCV_SZSHORT          __ASM_STR(2)
+#define RISCV_LGSHORT          __ASM_STR(1)
 #else
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
index e2e37c57cbeb24e19c6c48422818abb8f32af130..e65d1cd89e28bb5ae52291e8fa2d13937f28e1a9 100644 (file)
@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
  * have the AQ or RL bits set.  These don't return anything, so there's only
  * one version to worry about.
  */
-#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix)                               \
-static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)             \
-{                                                                                              \
-       __asm__ __volatile__ (                                                                  \
-               "amo" #asm_op "." #asm_type " zero, %1, %0"                                     \
-               : "+A" (v->counter)                                                             \
-               : "r" (I)                                                                       \
-               : "memory");                                                                    \
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)                             \
+static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)     \
+{                                                                                      \
+       __asm__ __volatile__ (                                                          \
+               "amo" #asm_op "." #asm_type " zero, %1, %0"                             \
+               : "+A" (v->counter)                                                     \
+               : "r" (I)                                                               \
+               : "memory");                                                            \
 }
 
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I)                        \
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I)                      \
+        ATOMIC_OP (op, asm_op, I, w,  int,   )
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I)                        \
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )   \
-        ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I)                      \
+        ATOMIC_OP (op, asm_op, I, w,  int,   ) \
+        ATOMIC_OP (op, asm_op, I, d, long, 64)
 #endif
 
-ATOMIC_OPS(add, add, +,  i)
-ATOMIC_OPS(sub, add, +, -i)
-ATOMIC_OPS(and, and, &,  i)
-ATOMIC_OPS( or,  or, |,  i)
-ATOMIC_OPS(xor, xor, ^,  i)
+ATOMIC_OPS(add, add,  i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and,  i)
+ATOMIC_OPS( or,  or,  i)
+ATOMIC_OPS(xor, xor,  i)
 
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^,  i)
  * There's two flavors of these: the arithmatic ops have both fetch and return
  * versions, while the logical ops only have fetch versions.
  */
-#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix)                   \
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix)                         \
 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)       \
 {                                                                                                      \
        register c_type ret;                                                                            \
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64)       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, d, long, 64)       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
 #endif
 
@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl,         )
 #undef ATOMIC_OPS
 
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)                                \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )                \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)                                \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )      \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
 #endif
 
-ATOMIC_OPS(and, and, &,  i,      , _relaxed)
-ATOMIC_OPS(and, and, &,  i, .aq  , _acquire)
-ATOMIC_OPS(and, and, &,  i, .rl  , _release)
-ATOMIC_OPS(and, and, &,  i, .aqrl,         )
+ATOMIC_OPS(and, and, i,      , _relaxed)
+ATOMIC_OPS(and, and, i, .aq  , _acquire)
+ATOMIC_OPS(and, and, i, .rl  , _release)
+ATOMIC_OPS(and, and, i, .aqrl,         )
 
-ATOMIC_OPS( or,  or, |,  i,      , _relaxed)
-ATOMIC_OPS( or,  or, |,  i, .aq  , _acquire)
-ATOMIC_OPS( or,  or, |,  i, .rl  , _release)
-ATOMIC_OPS( or,  or, |,  i, .aqrl,         )
+ATOMIC_OPS( or,  or, i,      , _relaxed)
+ATOMIC_OPS( or,  or, i, .aq  , _acquire)
+ATOMIC_OPS( or,  or, i, .rl  , _release)
+ATOMIC_OPS( or,  or, i, .aqrl,         )
 
-ATOMIC_OPS(xor, xor, ^,  i,      , _relaxed)
-ATOMIC_OPS(xor, xor, ^,  i, .aq  , _acquire)
-ATOMIC_OPS(xor, xor, ^,  i, .rl  , _release)
-ATOMIC_OPS(xor, xor, ^,  i, .aqrl,         )
+ATOMIC_OPS(xor, xor, i,      , _relaxed)
+ATOMIC_OPS(xor, xor, i, .aq  , _acquire)
+ATOMIC_OPS(xor, xor, i, .rl  , _release)
+ATOMIC_OPS(xor, xor, i, .aqrl,         )
 
 #undef ATOMIC_OPS
 
@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add,  <, 0)
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
 
-#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix)                                \
+#define ATOMIC_OP(op, func_op, I, c_type, prefix)                              \
 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)       \
 {                                                                              \
        atomic##prefix##_##func_op(I, v);                                       \
 }
 
-#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix)                          \
+#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)                                        \
 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)       \
 {                                                                                      \
        return atomic##prefix##_fetch_##func_op(I, v);                                  \
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                                \
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )                                \
+        ATOMIC_OP       (op, asm_op,       I,  int,   )                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                                \
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )                                \
+        ATOMIC_OP       (op, asm_op,       I,  int,   )                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_OP       (op, asm_op, c_op, I, long, 64)                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64)                                \
+        ATOMIC_OP       (op, asm_op,       I, long, 64)                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I, long, 64)                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
 #endif
 
@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
 
 /*
  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a barrier.  We just
- * use the other implementations directly.
+ * {cmp,}xchg and the operations that return, so they need a barrier.
+ */
+/*
+ * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
+ * assigning the same barrier to both the LR and SC operations, but that might
+ * not make any sense.  We're waiting on a memory model specification to
+ * determine exactly what the right thing to do is here.
  */
 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)                                             \
 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n)         \
index 183534b7c39b7d663ea757b2d0ff346e58bae15c..773c4e039cd7288bcd25ed53ce831db84c766f26 100644 (file)
 #define smp_rmb()      RISCV_FENCE(r,r)
 #define smp_wmb()      RISCV_FENCE(w,w)
 
-/*
- * These fences exist to enforce ordering around the relaxed AMOs.  The
- * documentation defines that
- * "
- *     atomic_fetch_add();
- *   is equivalent to:
- *     smp_mb__before_atomic();
- *     atomic_fetch_add_relaxed();
- *     smp_mb__after_atomic();
- * "
- * So we emit full fences on both sides.
- */
-#define __smb_mb__before_atomic()      smp_mb()
-#define __smb_mb__after_atomic()       smp_mb()
-
-/*
- * These barriers prevent accesses performed outside a spinlock from being moved
- * inside a spinlock.  Since RISC-V sets the aq/rl bits on our spinlock only
- * enforce release consistency, we need full fences here.
- */
-#define smb_mb__before_spinlock()      smp_mb()
-#define smb_mb__after_spinlock()       smp_mb()
-
 #include <asm-generic/barrier.h>
 
 #endif /* __ASSEMBLY__ */
index 7c281ef1d58320d24caf33d4d1af2c57f4c75d9b..f30daf26f08f44bbb1028640c0d8e2539220ab0f 100644 (file)
@@ -67,7 +67,7 @@
                : "memory");
 
 #define __test_and_op_bit(op, mod, nr, addr)                   \
-       __test_and_op_bit_ord(op, mod, nr, addr, )
+       __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
 #define __op_bit(op, mod, nr, addr)                            \
        __op_bit_ord(op, mod, nr, addr, )
 
index c3e13764a943c6f66b80dfe54f92abb10da87962..bfc7f099ab1fea28981d2a3a1ffce8aa25e4ea4d 100644 (file)
@@ -27,8 +27,8 @@
 typedef u32 bug_insn_t;
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
-#define __BUG_ENTRY_ADDR       INT " 1b - 2b"
-#define __BUG_ENTRY_FILE       INT " %0 - 2b"
+#define __BUG_ENTRY_ADDR       RISCV_INT " 1b - 2b"
+#define __BUG_ENTRY_FILE       RISCV_INT " %0 - 2b"
 #else
 #define __BUG_ENTRY_ADDR       RISCV_PTR " 1b"
 #define __BUG_ENTRY_FILE       RISCV_PTR " %0"
@@ -38,7 +38,7 @@ typedef u32 bug_insn_t;
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR "\n\t"         \
        __BUG_ENTRY_FILE "\n\t"         \
-       SHORT " %1"
+       RISCV_SHORT " %1"
 #else
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR
index 0595585013b07d899bccbe98598f3750bdd39bca..efd89a88d2d0e9b2bcd639a436a6143440a24d77 100644 (file)
 
 #undef flush_icache_range
 #undef flush_icache_user_range
+#undef flush_dcache_page
 
 static inline void local_flush_icache_all(void)
 {
        asm volatile ("fence.i" ::: "memory");
 }
 
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_page(struct page *page)
+{
+       if (test_bit(PG_dcache_clean, &page->flags))
+               clear_bit(PG_dcache_clean, &page->flags);
+}
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
+
 #ifndef CONFIG_SMP
 
-#define flush_icache_range(start, end) local_flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
 
 #else /* CONFIG_SMP */
 
-#define flush_icache_range(start, end) sbi_remote_fence_i(0)
-#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
+#define flush_icache_all() sbi_remote_fence_i(0)
+void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
 
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
 #endif /* _ASM_RISCV_CACHEFLUSH_H */
index c1f32cfcc79bbb7f35786e3ab9b9ab5b8f683df8..a82ce599b639813c9ed3ad697f217cb09a6538e1 100644 (file)
@@ -19,6 +19,8 @@
 #ifndef _ASM_RISCV_IO_H
 #define _ASM_RISCV_IO_H
 
+#include <linux/types.h>
+
 #ifdef CONFIG_MMU
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -32,7 +34,7 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 #define ioremap_wc(addr, size) ioremap((addr), (size))
 #define ioremap_wt(addr, size) ioremap((addr), (size))
 
-extern void iounmap(void __iomem *addr);
+extern void iounmap(volatile void __iomem *addr);
 
 #endif /* CONFIG_MMU */
 
@@ -250,7 +252,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
                        const ctype *buf = buffer;                              \
                                                                                \
                        do {                                                    \
-                               __raw_writeq(*buf++, addr);                     \
+                               __raw_write ## len(*buf++, addr);               \
                        } while (--count);                                      \
                }                                                               \
                afence;                                                         \
@@ -266,9 +268,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar())
 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par())
 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
-#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
-#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
-#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
+#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
+#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
+#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
 
 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -280,9 +282,9 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
-#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
-#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
-#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
+#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
+#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
+#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
 
 #ifdef CONFIG_64BIT
 __io_reads_ins(reads, u64, q, __io_br(), __io_ar())
index 66805cba9a27ad819a8cfc938e93f705fe432943..5df2dccdba122c4d5d02ffff7d82e8727b735ba6 100644 (file)
 
 typedef struct {
        void *vdso;
+#ifdef CONFIG_SMP
+       /* A local icache flush is needed before user execution can resume. */
+       cpumask_t icache_stale_mask;
+#endif
 } mm_context_t;
 
 #endif /* __ASSEMBLY__ */
index de1fc1631fc4367b439ce96ff21e2137ee8aa797..97424834dce2a7706bccebe505602d21fa78834b 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
 #ifndef _ASM_RISCV_MMU_CONTEXT_H
 #define _ASM_RISCV_MMU_CONTEXT_H
 
+#include <linux/mm_types.h>
 #include <asm-generic/mm_hooks.h>
 
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm,
        struct task_struct *task)
@@ -46,12 +49,54 @@ static inline void set_pgdir(pgd_t *pgd)
        csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
 }
 
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU.  RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches.  To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart.  This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       unsigned int cpu = smp_processor_id();
+       cpumask_t *mask = &mm->context.icache_stale_mask;
+
+       if (cpumask_test_cpu(cpu, mask)) {
+               cpumask_clear_cpu(cpu, mask);
+               /*
+                * Ensure the remote hart's writes are visible to this hart.
+                * This pairs with a barrier in flush_icache_mm.
+                */
+               smp_mb();
+               local_flush_icache_all();
+       }
+#endif
+}
+
 static inline void switch_mm(struct mm_struct *prev,
        struct mm_struct *next, struct task_struct *task)
 {
        if (likely(prev != next)) {
+               /*
+                * Mark the current MM context as inactive, and the next as
+                * active.  This is at least used by the icache flushing
+                * routines in order to determine who should
+                */
+               unsigned int cpu = smp_processor_id();
+
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+
                set_pgdir(next->pgd);
                local_flush_tlb_all();
+
+               flush_icache_deferred(next);
        }
 }
 
index 3399257780b2cc219b7c6a29a48e54f67d342865..2cbd92ed1629c00df42b8ceaffd2250b6de7413a 100644 (file)
@@ -178,28 +178,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
 #define pte_offset_map(dir, addr)      pte_offset_kernel((dir), (addr))
 #define pte_unmap(pte)                 ((void)(pte))
 
-/*
- * Certain architectures need to do special things when PTEs within
- * a page table are directly modified.  Thus, the following hook is
- * made available.
- */
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
-       *ptep = pteval;
-}
-
-static inline void set_pte_at(struct mm_struct *mm,
-       unsigned long addr, pte_t *ptep, pte_t pteval)
-{
-       set_pte(ptep, pteval);
-}
-
-static inline void pte_clear(struct mm_struct *mm,
-       unsigned long addr, pte_t *ptep)
-{
-       set_pte_at(mm, addr, ptep, __pte(0));
-}
-
 static inline int pte_present(pte_t pte)
 {
        return (pte_val(pte) & _PAGE_PRESENT);
@@ -210,21 +188,22 @@ static inline int pte_none(pte_t pte)
        return (pte_val(pte) == 0);
 }
 
-/* static inline int pte_read(pte_t pte) */
-
 static inline int pte_write(pte_t pte)
 {
        return pte_val(pte) & _PAGE_WRITE;
 }
 
+static inline int pte_exec(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_EXEC;
+}
+
 static inline int pte_huge(pte_t pte)
 {
        return pte_present(pte)
                && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
 }
 
-/* static inline int pte_exec(pte_t pte) */
-
 static inline int pte_dirty(pte_t pte)
 {
        return pte_val(pte) & _PAGE_DIRTY;
@@ -311,6 +290,33 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
        return pte_val(pte_a) == pte_val(pte_b);
 }
 
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified.  Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void set_pte_at(struct mm_struct *mm,
+       unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+       if (pte_present(pteval) && pte_exec(pteval))
+               flush_icache_pte(pteval);
+
+       set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+       unsigned long addr, pte_t *ptep)
+{
+       set_pte_at(mm, addr, ptep, __pte(0));
+}
+
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
                                        unsigned long address, pte_t *ptep,
index 04c71d938afdbf40f93d0d5d9d89229fa5f72c56..2fd27e8ef1fd686d8cf234143174a538223acc79 100644 (file)
@@ -24,7 +24,7 @@
 
 /* FIXME: Replace this with a ticket lock, like MIPS. */
 
-#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
@@ -58,15 +58,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        }
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_rmb();
-       do {
-               cpu_relax();
-       } while (arch_spin_is_locked(lock));
-       smp_acquire__after_ctrl_dep();
-}
-
 /***********************************************************/
 
 static inline void arch_read_lock(arch_rwlock_t *lock)
index 3df4932d8964faad964d0d3a3894ee0f0425c9d7..2f26989cb864bedaa1eb0bd53456ae33fcdcaabe 100644 (file)
@@ -18,7 +18,7 @@
 
 typedef unsigned long cycles_t;
 
-static inline cycles_t get_cycles(void)
+static inline cycles_t get_cycles_inline(void)
 {
        cycles_t n;
 
@@ -27,6 +27,7 @@ static inline cycles_t get_cycles(void)
                : "=r" (n));
        return n;
 }
+#define get_cycles get_cycles_inline
 
 #ifdef CONFIG_64BIT
 static inline uint64_t get_cycles64(void)
index 5ee4ae370b5e54d49f3c062d1a19c30ca3ddfec2..715b0f10af580811dfca3ba067819e07fe1e7374 100644 (file)
 
 #ifdef CONFIG_MMU
 
-/* Flush entire local TLB */
+#include <linux/mm_types.h>
+
+/*
+ * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction
+ * cache as well, so a 'fence.i' is not necessary.
+ */
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
new file mode 100644 (file)
index 0000000..a2ccf18
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
+#define _ASM_RISCV_VDSO_SYSCALLS_H
+
+#ifdef CONFIG_SMP
+
+/* These syscalls are only used by the vDSO and are not in the uapi. */
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+#endif
+
+#endif /* _ASM_RISCV_VDSO_H */
index 602f61257553727ff87a9b077702a6407e240c30..541544d64c33b2f95a0d2f3a1f7f3e80498ea006 100644 (file)
@@ -38,4 +38,8 @@ struct vdso_data {
        (void __user *)((unsigned long)(base) + __vdso_##name);                 \
 })
 
+#ifdef CONFIG_SMP
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif
+
 #endif /* _ASM_RISCV_VDSO_H */
index 5ded96b063526e0073e3d79a41ba8b62e21e040c..7e91f485047576b559b3a8549ec7b5fc80827ac3 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += setup.h
 generic-y += unistd.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 76af908f87c18b76502c71d31ed12925790ce816..78f670d701339055c11c8cbeef409f9edf08ba3a 100644 (file)
@@ -152,6 +152,3 @@ END(_start)
 __PAGE_ALIGNED_BSS
        /* Empty zero page */
        .balign PAGE_SIZE
-ENTRY(empty_zero_page)
-       .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
-END(empty_zero_page)
index 23cc81ec9e9444be312a2714b8aed2a06c5eee68..5517342487489b6ee35c4a95bbfa3d5c4a31f2aa 100644 (file)
@@ -12,4 +12,7 @@
 /*
  * Assembly functions that may be used (directly or indirectly) by modules
  */
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
index de7db114c31531ba244cb45c765b3fb2e1c559eb..8fbb6749910d42473d814b37eb255f812d4d206b 100644 (file)
@@ -58,7 +58,12 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 #endif /* CONFIG_CMDLINE_BOOL */
 
 unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
 unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
 
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
index b4a71ec5906f64089e7ec613daaade6bd3fef95a..6d3962435720d1ee7fe84bf74d50ffbe16d49219 100644 (file)
@@ -38,6 +38,13 @@ enum ipi_message_type {
        IPI_MAX
 };
 
+
+/* Unsupported */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
 irqreturn_t handle_ipi(void)
 {
        unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -108,3 +115,51 @@ void smp_send_reschedule(int cpu)
 {
        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
+
+/*
+ * Performs an icache flush for the given MM context.  RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+       unsigned int cpu;
+       cpumask_t others, *mask;
+
+       preempt_disable();
+
+       /* Mark every hart's icache as needing a flush for this MM. */
+       mask = &mm->context.icache_stale_mask;
+       cpumask_setall(mask);
+       /* Flush this hart's I$ now, and mark it as flushed. */
+       cpu = smp_processor_id();
+       cpumask_clear_cpu(cpu, mask);
+       local_flush_icache_all();
+
+       /*
+        * Flush the I$ of other harts concurrently executing, and mark them as
+        * flushed.
+        */
+       cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+       local |= cpumask_empty(&others);
+       if (mm != current->active_mm || !local)
+               sbi_remote_fence_i(others.bits);
+       else {
+               /*
+                * It's assumed that at least one strongly ordered operation is
+                * performed on this hart between setting a hart's cpumask bit
+                * and scheduling this MM context on that hart.  Sending an SBI
+                * remote message will do this, but in the case where no
+                * messages are sent we still need to order this hart's writes
+                * with flush_icache_deferred().
+                */
+               smp_mb();
+       }
+
+       preempt_enable();
+}
index 4351be7d0533a6e3857169fd1de5d6cfdbd1bafc..a2ae936a093e4f91d4def96fff0dd46e6e0226c0 100644 (file)
@@ -14,8 +14,8 @@
  */
 
 #include <linux/syscalls.h>
-#include <asm/cmpxchg.h>
 #include <asm/unistd.h>
+#include <asm/cacheflush.h>
 
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
                           unsigned long prot, unsigned long flags,
@@ -47,3 +47,34 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
        return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
 }
 #endif /* !CONFIG_64BIT */
+
+#ifdef CONFIG_SMP
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * sys_riscv_flush_icache() is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
+       uintptr_t, flags)
+{
+       struct mm_struct *mm = current->mm;
+       bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
+
+       /* Check the reserved flags. */
+       if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL))
+               return -EINVAL;
+
+       flush_icache_mm(mm, local);
+
+       return 0;
+}
+#endif
index 4e30dc5fb593580e67eef62f8ea61c797c498cf6..a5bd6401f95e6988a376406f02b7e39cb7c1352e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
+#include <asm/vdso.h>
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
@@ -22,4 +23,5 @@
 void *sys_call_table[__NR_syscalls] = {
        [0 ... __NR_syscalls - 1] = sys_ni_syscall,
 #include <asm/unistd.h>
+#include <asm/vdso-syscalls.h>
 };
index 523d0a8ac8db7f37750cf45496132d1156aceb38..324568d3392130fe5beba7bd03928a762848d377 100644 (file)
@@ -1,7 +1,12 @@
 # Copied from arch/tile/kernel/vdso/Makefile
 
 # Symbols present in the vdso
-vdso-syms = rt_sigreturn
+vdso-syms  = rt_sigreturn
+vdso-syms += gettimeofday
+vdso-syms += clock_gettime
+vdso-syms += clock_getres
+vdso-syms += getcpu
+vdso-syms += flush_icache
 
 # Files to link into the vdso
 obj-vdso = $(patsubst %, %.o, $(vdso-syms))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
new file mode 100644 (file)
index 0000000..edf7e23
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
+ENTRY(__vdso_clock_getres)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_clock_getres
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
new file mode 100644 (file)
index 0000000..aac6567
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
+ENTRY(__vdso_clock_gettime)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_clock_gettime
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
new file mode 100644 (file)
index 0000000..b0fbad7
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/vdso-syscalls.h>
+
+       .text
+/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
+ENTRY(__vdso_flush_icache)
+       .cfi_startproc
+#ifdef CONFIG_SMP
+       li a7, __NR_riscv_flush_icache
+       ecall
+#else
+       fence.i
+       li a0, 0
+#endif
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
new file mode 100644 (file)
index 0000000..cc7e989
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
+ENTRY(__vdso_getcpu)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_getcpu
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
new file mode 100644 (file)
index 0000000..da85d33
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
+ENTRY(__vdso_gettimeofday)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_gettimeofday
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_gettimeofday)
index 8c9dce95c11d4f472c4b555c644c8caa63294345..cd1d47e0724ba0bd811101e0fa0eb59f7841bd5c 100644 (file)
@@ -70,8 +70,11 @@ VERSION
        LINUX_4.15 {
        global:
                __vdso_rt_sigreturn;
-               __vdso_cmpxchg32;
-               __vdso_cmpxchg64;
+               __vdso_gettimeofday;
+               __vdso_clock_gettime;
+               __vdso_clock_getres;
+               __vdso_getcpu;
+               __vdso_flush_icache;
        local: *;
        };
 }
index 1cc4ac3964b4c124864b72f3c807560d28df1461..dce8ae24c6d33b4ca3ebd0c19c3650f1ce890bde 100644 (file)
@@ -84,6 +84,7 @@ void __delay(unsigned long cycles)
        while ((unsigned long)(get_cycles() - t0) < cycles)
                cpu_relax();
 }
+EXPORT_SYMBOL(__delay);
 
 void udelay(unsigned long usecs)
 {
index 81f7d9ce6d881df0da157b0913275f49517495d4..eb22ab49b3e008ec4ab677778302d5dbbea358b1 100644 (file)
@@ -2,3 +2,4 @@ obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
 obj-y += ioremap.o
+obj-y += cacheflush.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
new file mode 100644 (file)
index 0000000..498c0a0
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+void flush_icache_pte(pte_t pte)
+{
+       struct page *page = pte_page(pte);
+
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+               flush_icache_all();
+}
index e99194a4077ec2397aa978732e8554a2b303a6b4..70ef2724cdf61e5b2001f0ec6243b7f5e9c6bfaa 100644 (file)
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(ioremap);
  *
  * Caller must ensure there is only one unmapping for the same pointer.
  */
-void iounmap(void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
        vunmap((void *)((unsigned long)addr & PAGE_MASK));
 }
index 6b3f41985f28e17763b0f71973d56186ed28c7dc..de54cfc6109d833017b29b6e115549aad2601d53 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # s390/Makefile
 #
@@ -6,10 +7,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1994 by Linus Torvalds
 #
 
index ef3fb1b9201f0331d333dc991af47c84597292a7..cb6e8066b1ad64b1a65ee441e3c1fb53e5599a8b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  * Exports appldata_register_ops() and appldata_unregister_ops() for the
index 598df5708501734307565d3d7ba7c2e8b3472f6e..e68136c3c23aa467a9e10b362888023abe9559d4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects data related to memory management.
index 66037d2622b4075c708068a414aa4b346f8740c1..8bc14b0d1def0a6847437f7ade9a0abc4217b770 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects accumulated network statistics (Packets received/transmitted,
index 45b3178200abc184ef790458d7d9c44d717379df..433a994b1a89ef30861e86559d1849d8f6d01a16 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects misc. OS related data (CPU utilization, running processes).
index aed3069699bd5abf94ffbeb71a7db756b210b12f..bed227f267ae52aab3a02ec5e8a0f01767896a10 100644 (file)
@@ -1,11 +1,8 @@
 #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
 #
 # arch/s390x/boot/install.sh
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1995 by Linus Torvalds
 #
 # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
index b48e20dd94e96a52f845782c2dd134b015794004..d60798737d8669ea447ae0905b98abf64e85aea6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
  *             Harald Freudenberger <freude@de.ibm.com>
  *
  * Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #define KMSG_COMPONENT "aes_s390"
index 36aefc07d10cda9e9b28705d057952558d86e8d3..8720e9203ecfb07beac7c878aedfc925e2b0db1b 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 arch random implementation.
  *
  * Copyright IBM Corp. 2017
  * Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #include <linux/kernel.h>
index 992e630c227b58febc6e489f25e439979a2df4ca..436865926c26e00b0652c5330f45dd7365962703 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Crypto-API module for CRC-32 algorithms implemented with the
  * z/Architecture Vector Extension Facility.
index 0d296662bbf0aba03dbaa79de182114e315b6184..5346b5a80bb6c1bfd805b421e7fcf05e86157a12 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * Copyright IBM Corp. 2003, 2011
  * Author(s): Thomas Spatzier
  *           Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include <linux/init.h>
index 564616d48d8bd885ce31c232843f02bbed1d2b3f..3b7f96c9eead8f994e979603216fd0d190fc9e42 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
index a4e903ed7e21c0ccc7ceb66944ac6b43662003ea..003932db8d12d04bfc479d17d9a6677068059293 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
@@ -7,11 +8,6 @@
  *   Copyright IBM Corp. 2017
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *             Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "paes_s390"
index 3e47c4a0f18b346f4ce9eb58ddb4928bcfe517a3..a97a1802cfb4d37d6e0ef5b9a9305aaf45febff7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2006, 2015
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
index 10f2007900790919f41ac6f3d3bd425c9decebb6..d6f8258b44df381943b8c570883ace6484363edb 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #ifndef _CRYPTO_ARCH_S390_SHA_H
 #define _CRYPTO_ARCH_S390_SHA_H
index 53c277999a2866b3aea6e9cc412ab6fc55aeb124..944aa6b237cd828fa17356b2eb961be87b57bbab 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * s390 Version:
  *   Copyright IBM Corp. 2005, 2011
  *   Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
index 2f4caa1ef123d1e6ce1bbf332ba045510e12ae07..b17eded532b121a76ddab6a8e9227a92fcbbfef6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
index c740f77285b2a6cf9d468b84a4c357cc8b6eba17..cf0718d121bcbb02f035f39474481947e447cf0e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #include <crypto/internal/hash.h>
index cf8a2d92467f363a6a2195f77a6d07dff2b5d3d0..43bbe63e2992c1ef525597bf4475b5f4c26d134b 100644 (file)
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *    Hypervisor filesystem for Linux on s390.
  *
  *    Copyright IBM Corp. 2006, 2008
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
- *    License: GPL
  */
 
 #define KMSG_COMPONENT "hypfs"
index 792cda339af1ae3ad25ce8fc0457f4db961a4b8a..dd08db491b89e149fc7894a5f77578db8fe950d6 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * CPU-measurement facilities
  *
  *  Copyright IBM Corp. 2012
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *            Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
index 9a3cb3983c0140110f791c14ea0dbdb8cd84159d..1a61b1b997f2a0da08882411a8b701780fffba9a 100644 (file)
@@ -194,13 +194,14 @@ struct arch_elf_state {
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE                (is_compat_task() ? 0x000400000UL : \
-                                                   0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk. 64-bit
+   tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+                               (STACK_TOP / 3 * 2) : \
+                               (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
index 921391f2341eb8c4e886ee1bb4b67554f2d35d91..13de80cf741c09d94a4996a5ca18d883cbe0eba7 100644 (file)
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 #ifndef _ASM_S390_KPROBES_H
 #define _ASM_S390_KPROBES_H
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
index f3a9b5a445b64382c1020099b6dbcec5d9f30ede..e14f381757f67b6c0111c78c491c2c1078a7f177 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for kernel virtual machines on s390
  *
  * Copyright IBM Corp. 2008, 2009
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index 41393052ac57e1966ca735295be5f20f58ac3c55..74eeec9c0a809bffecbb26f3875ffc5be62e7e3b 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for paravirtual devices on s390
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
 /*
@@ -20,8 +17,6 @@
  *
  * Copyright IBM Corp. 2007,2008
  * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
  */
 #ifndef __S390_KVM_PARA_H
 #define __S390_KVM_PARA_H
index 6de5c6cb0061a337d251fbabfe6d8a1c3769682d..672f95b12d4065b4fa023444dbd5575ca71d767e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * livepatch.h - s390-specific Kernel Live Patching Core
  *
@@ -7,13 +8,6 @@
  *           Jiri Slaby
  */
 
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
 #ifndef ASM_LIVEPATCH_H
 #define ASM_LIVEPATCH_H
 
index f4a07f788f78b3160f9ae312e699805b47f67ebe..65154eaa3714a4e9182cb87654e7b896e7be3e2f 100644 (file)
@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
-               current->mm->context.alloc_pgste;
+               (current->mm && current->mm->context.alloc_pgste);
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
        mm->context.use_cmma = 0;
index d6c9d1e0dc2d4bc0fe36a46109211f93682e5cfd..b9c0e361748bb46eb5dddb60f79e5de27eef51b1 100644 (file)
@@ -40,6 +40,7 @@ struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs) perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
 
 /* Perf pt_regs extension for sample-data-entry indicators */
 struct perf_sf_sde_regs {
index d7fe9838084d3b2df31b26d16d6d3f4074d9a35d..57d7bc92e0b8a766d24520ea5234fca56971b646 100644 (file)
@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
@@ -1264,6 +1264,12 @@ static inline pud_t pud_mkwrite(pud_t pud)
        return pud;
 }
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+       return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
 static inline pud_t pud_mkclean(pud_t pud)
 {
        if (pud_large(pud)) {
index a3788dafc0e1f2272abd0ba9c455b775e81f627a..6f70d81c40f239fde907795707c2e8ebc2bb9d47 100644 (file)
@@ -74,9 +74,14 @@ enum {
  */
 struct pt_regs 
 {
-       unsigned long args[1];
-       psw_t psw;
-       unsigned long gprs[NUM_GPRS];
+       union {
+               user_pt_regs user_regs;
+               struct {
+                       unsigned long args[1];
+                       psw_t psw;
+                       unsigned long gprs[NUM_GPRS];
+               };
+       };
        unsigned long orig_gpr2;
        unsigned int int_code;
        unsigned int int_parm;
index 6bc941be6921773f566efd701a213ef44793a793..96f9a9151fde02fc6f76633d76d292f47512d364 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Access to user system call parameters and results
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef _ASM_SYSCALL_H
index a702cb9d4269240c462764878b50e02971f5d8ae..25057c118d563d46f9b45a637670c1d124a88509 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for store system information stsi
  *
  * Copyright IBM Corp. 2001, 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
  *              Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 1807229b292f005a4a0658ac221c3cb0481a39bf..cca406fdbe51fcf9985320c10988a02025b099bc 100644 (file)
@@ -53,6 +53,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 static inline void topology_init_early(void) { }
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
 static inline void topology_expect_change(void) { }
 
 #endif /* CONFIG_SCHED_TOPOLOGY */
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..cefe7c7
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 9ad172dcd912d5763b0bf954617c9e398ad31aa8..38535a57fef8327c3b08bf20e1f8621fd93c776a 100644 (file)
@@ -6,10 +6,6 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 0dc86b3a7cb0d6340d9de5bee6032f6bfcc0c85c..b9ab584adf43d71232ce44414e51413423fbf673 100644 (file)
@@ -4,9 +4,5 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
index c36c97ffdc6fa24f246c41bfe993790410e4e032..84606b8cc49e47c794fb3b16ef5681de098bfc6f 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef __LINUX_KVM_PERF_S390_H
index 0d23c8ff290085b43745fdc23cd18a2911624aca..543dd70e12c81d59a9987c437f1895a216c2525b 100644 (file)
 #define GPR_SIZE       8
 #define CR_SIZE                8
 
-#define STACK_FRAME_OVERHEAD    160      /* size of minimum stack frame */
+#define STACK_FRAME_OVERHEAD   160      /* size of minimum stack frame */
 
 #endif /* __s390x__ */
 
 #define ACR_SIZE       4
 
 
-#define PTRACE_OLDSETOPTIONS         21
+#define PTRACE_OLDSETOPTIONS        21
 
 #ifndef __ASSEMBLY__
 #include <linux/stddef.h>
 #include <linux/types.h>
 
-typedef union
-{
-       float   f;
-       double  d;
-        __u64   ui;
+typedef union {
+       float   f;
+       double  d;
+       __u64   ui;
        struct
        {
                __u32 hi;
@@ -197,23 +196,21 @@ typedef union
        } fp;
 } freg_t;
 
-typedef struct
-{
-       __u32   fpc;
+typedef struct {
+       __u32   fpc;
        __u32   pad;
-       freg_t  fprs[NUM_FPRS];              
+       freg_t  fprs[NUM_FPRS];
 } s390_fp_regs;
 
-#define FPC_EXCEPTION_MASK      0xF8000000
-#define FPC_FLAGS_MASK          0x00F80000
-#define FPC_DXC_MASK            0x0000FF00
-#define FPC_RM_MASK             0x00000003
+#define FPC_EXCEPTION_MASK     0xF8000000
+#define FPC_FLAGS_MASK         0x00F80000
+#define FPC_DXC_MASK           0x0000FF00
+#define FPC_RM_MASK            0x00000003
 
 /* this typedef defines how a Program Status Word looks like */
-typedef struct 
-{
-        unsigned long mask;
-        unsigned long addr;
+typedef struct {
+       unsigned long mask;
+       unsigned long addr;
 } __attribute__ ((aligned(8))) psw_t;
 
 #ifndef __s390x__
@@ -282,33 +279,40 @@ typedef struct
 /*
  * The s390_regs structure is used to define the elf_gregset_t.
  */
-typedef struct
-{
+typedef struct {
        psw_t psw;
        unsigned long gprs[NUM_GPRS];
        unsigned int  acrs[NUM_ACRS];
        unsigned long orig_gpr2;
 } s390_regs;
 
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+       unsigned long args[1];
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
 /*
  * Now for the user space program event recording (trace) definitions.
  * The following structures are used only for the ptrace interface, don't
  * touch or even look at it if you don't want to modify the user-space
  * ptrace interface. In particular stay away from it for in-kernel PER.
  */
-typedef struct
-{
+typedef struct {
        unsigned long cr[NUM_CR_WORDS];
 } per_cr_words;
 
 #define PER_EM_MASK 0xE8000000UL
 
-typedef        struct
-{
+typedef struct {
 #ifdef __s390x__
-       unsigned                       : 32;
+       unsigned                       : 32;
 #endif /* __s390x__ */
-       unsigned em_branching          : 1;
+       unsigned em_branching          : 1;
        unsigned em_instruction_fetch  : 1;
        /*
         * Switching on storage alteration automatically fixes
@@ -317,44 +321,41 @@ typedef   struct
        unsigned em_storage_alteration : 1;
        unsigned em_gpr_alt_unused     : 1;
        unsigned em_store_real_address : 1;
-       unsigned                       : 3;
+       unsigned                       : 3;
        unsigned branch_addr_ctl       : 1;
-       unsigned                       : 1;
+       unsigned                       : 1;
        unsigned storage_alt_space_ctl : 1;
-       unsigned                       : 21;
+       unsigned                       : 21;
        unsigned long starting_addr;
        unsigned long ending_addr;
 } per_cr_bits;
 
-typedef struct
-{
+typedef struct {
        unsigned short perc_atmid;
        unsigned long address;
        unsigned char access_id;
 } per_lowcore_words;
 
-typedef struct
-{
-       unsigned perc_branching          : 1;
+typedef struct {
+       unsigned perc_branching          : 1;
        unsigned perc_instruction_fetch  : 1;
        unsigned perc_storage_alteration : 1;
-       unsigned perc_gpr_alt_unused     : 1;
+       unsigned perc_gpr_alt_unused     : 1;
        unsigned perc_store_real_address : 1;
-       unsigned                         : 3;
-       unsigned atmid_psw_bit_31        : 1;
-       unsigned atmid_validity_bit      : 1;
-       unsigned atmid_psw_bit_32        : 1;
-       unsigned atmid_psw_bit_5         : 1;
-       unsigned atmid_psw_bit_16        : 1;
-       unsigned atmid_psw_bit_17        : 1;
-       unsigned si                      : 2;
+       unsigned                         : 3;
+       unsigned atmid_psw_bit_31        : 1;
+       unsigned atmid_validity_bit      : 1;
+       unsigned atmid_psw_bit_32        : 1;
+       unsigned atmid_psw_bit_5         : 1;
+       unsigned atmid_psw_bit_16        : 1;
+       unsigned atmid_psw_bit_17        : 1;
+       unsigned si                      : 2;
        unsigned long address;
-       unsigned                         : 4;
-       unsigned access_id               : 4;
+       unsigned                         : 4;
+       unsigned access_id               : 4;
 } per_lowcore_bits;
 
-typedef struct
-{
+typedef struct {
        union {
                per_cr_words   words;
                per_cr_bits    bits;
@@ -364,9 +365,9 @@ typedef struct
         * the kernel always sets them to zero. To enable single
         * stepping use ptrace(PTRACE_SINGLESTEP) instead.
         */
-       unsigned  single_step       : 1;
+       unsigned  single_step       : 1;
        unsigned  instruction_fetch : 1;
-       unsigned                    : 30;
+       unsigned                    : 30;
        /*
         * These addresses are copied into cr10 & cr11 if single
         * stepping is switched off
@@ -376,11 +377,10 @@ typedef struct
        union {
                per_lowcore_words words;
                per_lowcore_bits  bits;
-       } lowcore; 
+       } lowcore;
 } per_struct;
 
-typedef struct
-{
+typedef struct {
        unsigned int  len;
        unsigned long kernel_addr;
        unsigned long process_addr;
@@ -390,12 +390,12 @@ typedef struct
  * S/390 specific non posix ptrace requests. I chose unusual values so
  * they are unlikely to clash with future ptrace definitions.
  */
-#define PTRACE_PEEKUSR_AREA           0x5000
-#define PTRACE_POKEUSR_AREA           0x5001
+#define PTRACE_PEEKUSR_AREA          0x5000
+#define PTRACE_POKEUSR_AREA          0x5001
 #define PTRACE_PEEKTEXT_AREA         0x5002
 #define PTRACE_PEEKDATA_AREA         0x5003
 #define PTRACE_POKETEXT_AREA         0x5004
-#define PTRACE_POKEDATA_AREA         0x5005
+#define PTRACE_POKEDATA_AREA         0x5005
 #define PTRACE_GET_LAST_BREAK        0x5006
 #define PTRACE_PEEK_SYSTEM_CALL       0x5007
 #define PTRACE_POKE_SYSTEM_CALL              0x5008
@@ -413,21 +413,19 @@ typedef struct
  * PT_PROT definition is loosely based on hppa bsd definition in
  * gdb/hppab-nat.c
  */
-#define PTRACE_PROT                       21
+#define PTRACE_PROT                      21
 
-typedef enum
-{
+typedef enum {
        ptprot_set_access_watchpoint,
        ptprot_set_write_watchpoint,
        ptprot_disable_watchpoint
 } ptprot_flags;
 
-typedef struct
-{
+typedef struct {
        unsigned long lowaddr;
        unsigned long hiaddr;
        ptprot_flags prot;
-} ptprot_area;                     
+} ptprot_area;
 
 /* Sequence of bytes for breakpoint illegal instruction.  */
 #define S390_BREAKPOINT     {0x0,0x1}
@@ -439,8 +437,7 @@ typedef struct
  * The user_regs_struct defines the way the user registers are
  * store on the stack for signal handling.
  */
-struct user_regs_struct
-{
+struct user_regs_struct {
        psw_t psw;
        unsigned long gprs[NUM_GPRS];
        unsigned int  acrs[NUM_ACRS];
index 967aad39010515cd9614e51529517712630bbf4e..3a77833c74dc20e065e0bc3d77ce0bda9f955fa5 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright IBM Corp. 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *  Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 #ifndef __KVM_VIRTIO_CCW_H
index 137ef473584ee5e2c6e42f70857cadd4e90df3b6..d568307321fcc54f51c1b3609dffc365cdc3a65d 100644 (file)
@@ -9,20 +9,6 @@
  *            Eric Rossman (edrossma@us.ibm.com)
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef __ASM_S390_ZCRYPT_H
index 58b9e127b61517c3f1cbe41a76757f7db156df76..80e974adb9e8be39d4ab558495eb69d110a2b3a3 100644 (file)
@@ -1392,7 +1392,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
        else
                except_str = "-";
        caller = (unsigned long) entry->caller;
-       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p  ",
+       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK  ",
                      area, sec, usec, level, except_str,
                      entry->id.fields.cpuid, (void *)caller);
        return rc;
index 3be829721cf948adc5349a342f04073a90dcf20e..b2c68fbf26346a3e9d6e626333a34c7c374bef31 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Disassemble s390 instructions.
  *
@@ -396,9 +397,14 @@ struct s390_insn *find_insn(unsigned char *code)
        unsigned char opfrag;
        int i;
 
+       /* Search the opcode offset table to find an entry which
+        * matches the beginning of the opcode. If there is no match
+        * the last entry will be used, which is the default entry for
+        * unknown instructions as well as 1-byte opcode instructions.
+        */
        for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
                entry = &opcode_offset[i];
-               if (entry->opcode == code[0] || entry->opcode == 0)
+               if (entry->opcode == code[0])
                        break;
        }
 
@@ -543,7 +549,7 @@ void show_code(struct pt_regs *regs)
                start += opsize;
                pr_cont("%s", buffer);
                ptr = buffer;
-               ptr += sprintf(ptr, "\n\t  ");
+               ptr += sprintf(ptr, "\n          ");
                hops++;
        }
        pr_cont("\n");
index 2aa545dca4d53c1c5b2bb7c6bdc18db10fd37930..5b23c4f6e50cd452177477105b914774d67898ad 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack dumping functions
  *
index a316cd6999ad9712defdf46db85e16eb429aebcb..9e5f6cd8e4c2e443a2c7fb46792157933447d170 100644 (file)
@@ -180,18 +180,17 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       lgr     %r1,%r2
-       aghi    %r1,__TASK_thread               # thread_struct of prev task
-       lg      %r5,__TASK_stack(%r3)           # start of kernel stack of next
-       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
-       lgr     %r1,%r3
-       aghi    %r1,__TASK_thread               # thread_struct of next task
+       lghi    %r4,__TASK_stack
+       lghi    %r1,__TASK_thread
+       lg      %r5,0(%r4,%r3)                  # start of kernel stack of next
+       stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
-       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
-       mvc     __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+       lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
+       aghi    %r3,__TASK_pid
+       mvc     __LC_CURRENT_PID(4,%r0),0(%r3)  # store pid of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
        bzr     %r14
index 310e59e6eb4b20bb7f17debb8ec412546289baf8..8ecb8726ac4762582a6ced188444ed14ea48cf46 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    ipl/reipl/dump support for Linux on s390.
  *
index 1a6521af17514a722c02e1b032fe3514d6857a15..af3722c28fd961283ff31c578d5329b4ee74fa60 100644 (file)
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
index bf9622f0e6b16aa291037d880f8ffe3ec75c1046..452502f9a0d986d4f8f97295143146cc26569c30 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux Guest Relocation (LGR) detection
  *
index 7b87991416fd6d882e7edf3f52b6f4af6fc45ad3..b7abfad4fd7df5583b19867dda1f81d22b81dcaa 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel module help for s390.
  *
@@ -8,20 +9,6 @@
  *
  *  based on i386 version
  *    Copyright (C) 2001 Rusty Russell.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/module.h>
 #include <linux/elf.h>
index 6ff169253caeea0be88da521b84b796a72accb6e..c7a627620e5ebc4a7050167f15201abcad3eaf6e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   Machine check handler
  *
index 746d034233336f3804923a9340288d0f595f0211..cc085e2d2ce9907690fbe0912dd301ab44e8171d 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x - CPU-measurement Counter Facility
  *
  *  Copyright IBM Corp. 2012, 2017
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_cf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 227b38bd82c94f211392348ec03dd146549d19c4..1c9ddd7aa5ec8fd32ee626d036a3c3ea6ed79362 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for the System z CPU-measurement Sampling Facility
  *
  * Copyright IBM Corp. 2013
  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_sf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 93a386f4a3b5a4533e9b0d52c1db2548a72e20ef..0d770e513abf404ff2cea26f7c01931b4d60cf4b 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x
  *
  *  Copyright IBM Corp. 2012, 2013
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "perf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 26c0523c14882d1b967ef8cb81a5cb6b387017a2..cd3df5514552cc262dee1d1b99260d0ee089668b 100644 (file)
@@ -1650,6 +1650,14 @@ static const struct user_regset s390_compat_regsets[] = {
                .get = s390_gs_cb_get,
                .set = s390_gs_cb_set,
        },
+       {
+               .core_note_type = NT_S390_GS_BC,
+               .n = sizeof(struct gs_cb) / sizeof(__u64),
+               .size = sizeof(__u64),
+               .align = sizeof(__u64),
+               .get = s390_gs_bc_get,
+               .set = s390_gs_bc_set,
+       },
        {
                .core_note_type = NT_S390_RI_CB,
                .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
index 090053cf279bb1d7077082dcc1a90ababb716478..793da97f9a6e53716e415bbc5e68cf844ce4cb1d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  S390 version
  *    Copyright IBM Corp. 1999, 2012
index cd4334e80b64cdb7908842f44015183be17c62f2..b8c1a85bcf2de75eccba0b5b86eb2870bd4b71b6 100644 (file)
@@ -55,6 +55,7 @@
 #include <asm/sigp.h>
 #include <asm/idle.h>
 #include <asm/nmi.h>
+#include <asm/topology.h>
 #include "entry.h"
 
 enum {
index e66687dc61446dc929c4450a7c887740dca11595..460dcfba7d4ec08db7de61942ea387ef38579a99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack trace management functions
  *
index 12981e197f0125dcaea9f0bcb13f49207be00cec..80b862e9c53c6b108e611935ab8dd7c9b794ff13 100644 (file)
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * store hypervisor information instruction emulation functions.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  * Copyright IBM Corp. 2016
  * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
  */
index be6198193ec29fc4a78c83c2c4f271e51a0693db..cf561160ea887f9b6395e1d5ec30f9ee02aba77d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Time of day based timer functions.
  *
index f9b393d4a078365ff8f83db8feaae839a20e0274..4d5b65e527b5495f17598ea20eb22bfb2c5ff754 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
index 39a218703c50add3defe9aa2ce5a85f88188e776..f3a1c7c6824ef0da8933fbb64864fdf5b97bd99a 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * vdso setup for s390
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #include <linux/init.h>
index eca3f001f081309c88372de459a6dfcccd754e7d..f61df5253c23c55ed26b357a645b16f4ab2e26a4 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index a5769b83d90e687f08175af96ded9a4893ebd2d2..2d6ec3abe095ea30bac898e85fd4cf4faf06bbe5 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 63b86dceb0bfec0f72886d96230e5248620f8728..aa8bf13a2edb1f77c861ed6de0acc2a2836e26fa 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index c8513deb8c663f51fa4f4bc2b2299ea24f1aaefd..faf5213b15dfae9536f2583b601c1b4610558249 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 5d7b56b49458d03ba885f105a9c07bcdecea019a..6046b3bfca4622ea87bd1759bfa36e3f2d5b6d89 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index b02e62f3bc12d4ffb3850aff587d20ad944f4688..cc9dbc27da6fbcd22e865dd502be7f8bd515a11c 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index dd7178fbb4f3bd3f32955eeacf6e640f371e06a3..f24395a0191828ec7a638042632c69d4f4fcb939 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Virtual cpu timer based timer functions.
  *
index 98ad8b9e036093c8a784cfc0dfd3887e925c6357..9614aea5839b6ecf1c36e2ccbbd64e2592621dd9 100644 (file)
@@ -3372,7 +3372,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int rc;
-       sigset_t sigsaved;
 
        if (kvm_run->immediate_exit)
                return -EINTR;
@@ -3382,8 +3381,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 0;
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
                kvm_s390_vcpu_start(vcpu);
@@ -3417,8 +3415,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu, kvm_run);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        vcpu->stat.exit_userspace++;
        return rc;
index 3d017171ff8f781f960443e8f7c8783bed49e6c0..6cf024eb2085d86e6a729e3cfa94e80a50f02a38 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Collaborative memory management interface.
  *
index b2c140193b0af72273ffcbafd78a4ee38417ca42..05d459b638f55d563d479eb978a5d60f0e421e1b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  KVM guest address space mapping code
  *
index 5bea139517a2edc21dc50074d2c2e9a94dabb19e..831bdcf407bbc1d2d76edc78e6a9f50aae1406cb 100644 (file)
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  flexible mmap layout support
  *
  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *
  * Started by Ingo Molnar <mingo@elte.hu>
  */
 
index ae677f814bc07a406f7f996a81ed2db65718f5ff..4f2b65d01a70418c802d6ce33713101ec0e2909b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
index 0fe649c0d5423a2ed51fcff4dc7d011204fdf4e9..4902fed221c0effa59ff21fedabdda152641d112 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index c2f786f0ea0688c5fb9c36659ff66fb8140e6f2b..b482e95b6249e380dfb39d89253789c61dedb1e3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012,2015
  *
index 0d300ee00f4e95b987884bbeb0fa1fce1bee7b59..f7aa5a77827ec17d893d59834a0d33082bb8fd82 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index 81b840bc6e4e733064d20f309bb57f24820f01cb..19bcb3b45a70fc12fa426d636fd4482c570c6654 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 specific pci instructions
  *
index c94ee54210bc489efd469493800596cd2b7061a3..81271d3af47cb1000ebfab2539efa92080a1eccc 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y      += bpf_perf_event.h
 generic-y      += siginfo.h
index e28531333efa96d7195e1e9771d574c83caa040d..ba4d39cb321d0608d96f0b0c5ea45ddadea7cc45 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 5a9e96be16652bc13bb4e6cd0f298b0e613d5883..9937c5ff94a9fe9eaeb8744ee2786ad7a5a2afa2 100644 (file)
@@ -715,7 +715,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return pte_pfn(pte);
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline unsigned long pmd_write(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
index 2178c78c7c1a6336d4a11c9619de521519245c0e..4680ba246b554708aec94287f7974adcca4c8c97 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += types.h
index 5335ba3c850ed3acdc074ffe639d3ddac101f2ad..33c0f8bb0f33de0c6beadd3dd8a9bef6253bcb10 100644 (file)
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        if (!(pmd_val(pmd) & _PAGE_VALID))
                return 0;
 
-       if (write && !pmd_write(pmd))
+       if (!pmd_access_permitted(pmd, write))
                return 0;
 
        refs = 0;
@@ -114,7 +114,7 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
        if (!(pud_val(pud) & _PAGE_VALID))
                return 0;
 
-       if (write && !pud_write(pud))
+       if (!pud_access_permitted(pud, write))
                return 0;
 
        refs = 0;
index 2a26cc4fefc27fda65d15be3b3a0b719231ad609..adfa21b18488f215b7b56cfc43daa52877c77dd3 100644 (file)
@@ -475,7 +475,6 @@ static inline void pmd_clear(pmd_t *pmdp)
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_huge_page(pmd)     pte_huge(pmd_pte(pmd))
 #define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define __HAVE_ARCH_PMD_WRITE
 
 #define pfn_pmd(pfn, pgprot)   pte_pmd(pfn_pte((pfn), (pgprot)))
 #define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
index 5711de0a1b5efc92519e152462a0ef1516612add..cc439612bcd52fee78256802f5aac1ded0c37ec8 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 759a71411169f4df318e3eb3e97e0f7602a04bb0..8611ef980554c2ef81378a087d15d42f1e3f3acb 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 1bfb99770c34197b6c0627897753d282b3e5c378..977de5fb968be412862de87aeb95136bae69e620 100644 (file)
@@ -1161,7 +1161,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
                        int emulation_type)
 {
-       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+       return x86_emulate_instruction(vcpu, 0,
+                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
 }
 
 void kvm_enable_efer_bits(u64);
index 09f9e1e00e3bd30b5869b126f2ab11be49388f05..95e2dfd755218ccfaf6417b44c822b545a35568e 100644 (file)
@@ -1061,7 +1061,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
                                  unsigned long address, pmd_t *pmdp);
 
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return pmd_flags(pmd) & _PAGE_RW;
@@ -1088,6 +1088,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 }
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_RW;
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
index da1489cb64dce5fcec622f2869cd26d2b58d00ac..1e901e421f2db09d4a0c2c543f47b0e5bbc8be62 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generated-y += unistd_32.h
 generated-y += unistd_64.h
 generated-y += unistd_x32.h
index cdc70a3a65838b10d558c3d0b14bcdf4d9e996d2..c2cea6651279f706f488cf51a523301e6de4ae77 100644 (file)
@@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
        [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
        [CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
        [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
-       [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+       [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
        [CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
        [CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
        [CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
index 8079d141792af91994421d15c19c26d3bd386c59..e7d04d0c8008d1a1d69966105ad855351a1f474f 100644 (file)
@@ -4014,6 +4014,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
                                   fxstate_size(ctxt));
 }
 
+/*
+ * FXRSTOR might restore XMM registers not provided by the guest. Fill
+ * in the host registers (via FXSAVE) instead, so they won't be modified.
+ * (preemption has to stay disabled until FXRSTOR).
+ *
+ * Use noinline to keep the stack for other functions called by callers small.
+ */
+static noinline int fxregs_fixup(struct fxregs_state *fx_state,
+                                const size_t used_size)
+{
+       struct fxregs_state fx_tmp;
+       int rc;
+
+       rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
+       memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
+              __fxstate_size(16) - used_size);
+
+       return rc;
+}
+
 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 {
        struct fxregs_state fx_state;
@@ -4024,19 +4044,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
+       size = fxstate_size(ctxt);
+       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+
        ctxt->ops->get_fpu(ctxt);
 
-       size = fxstate_size(ctxt);
        if (size < __fxstate_size(16)) {
-               rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+               rc = fxregs_fixup(&fx_state, size);
                if (rc != X86EMUL_CONTINUE)
                        goto out;
        }
 
-       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
-       if (rc != X86EMUL_CONTINUE)
-               goto out;
-
        if (fx_state.mxcsr >> 16) {
                rc = emulate_gp(ctxt, 0);
                goto out;
@@ -5000,6 +5020,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        bool op_prefix = false;
        bool has_seg_override = false;
        struct opcode opcode;
+       u16 dummy;
+       struct desc_struct desc;
 
        ctxt->memop.type = OP_NONE;
        ctxt->memopp = NULL;
@@ -5018,6 +5040,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        switch (mode) {
        case X86EMUL_MODE_REAL:
        case X86EMUL_MODE_VM86:
+               def_op_bytes = def_ad_bytes = 2;
+               ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
+               if (desc.d)
+                       def_op_bytes = def_ad_bytes = 4;
+               break;
        case X86EMUL_MODE_PROT16:
                def_op_bytes = def_ad_bytes = 2;
                break;
index bdff437acbcb7ebc3307523edd848fb7db009c39..4e822ad363f37f613d14ab94f35609bcf3539bf7 100644 (file)
@@ -209,12 +209,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 
        old_irr = ioapic->irr;
        ioapic->irr |= mask;
-       if (edge)
+       if (edge) {
                ioapic->irr_delivered &= ~mask;
-       if ((edge && old_irr == ioapic->irr) ||
-           (!edge && entry.fields.remote_irr)) {
-               ret = 0;
-               goto out;
+               if (old_irr == ioapic->irr) {
+                       ret = 0;
+                       goto out;
+               }
        }
 
        ret = ioapic_service(ioapic, irq, line_status);
@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
                    index == RTC_GSI) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
                                     e->fields.dest_id, e->fields.dest_mode) ||
-                           (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
-                            kvm_apic_pending_eoi(vcpu, e->fields.vector)))
+                           kvm_apic_pending_eoi(vcpu, e->fields.vector))
                                __set_bit(e->fields.vector,
                                          ioapic_handled_vectors);
                }
@@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 {
        unsigned index;
        bool mask_before, mask_after;
+       int old_remote_irr, old_delivery_status;
        union kvm_ioapic_redirect_entry *e;
 
        switch (ioapic->ioregsel) {
@@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        return;
                e = &ioapic->redirtbl[index];
                mask_before = e->fields.mask;
+               /* Preserve read-only fields */
+               old_remote_irr = e->fields.remote_irr;
+               old_delivery_status = e->fields.delivery_status;
                if (ioapic->ioregsel & 1) {
                        e->bits &= 0xffffffff;
                        e->bits |= (u64) val << 32;
                } else {
                        e->bits &= ~0xffffffffULL;
                        e->bits |= (u32) val;
-                       e->fields.remote_irr = 0;
                }
+               e->fields.remote_irr = old_remote_irr;
+               e->fields.delivery_status = old_delivery_status;
+
+               /*
+                * Some OSes (Linux, Xen) assume that Remote IRR bit will
+                * be cleared by IOAPIC hardware when the entry is configured
+                * as edge-triggered. This behavior is used to simulate an
+                * explicit EOI on IOAPICs that don't have the EOI register.
+                */
+               if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
+                       e->fields.remote_irr = 0;
+
                mask_after = e->fields.mask;
                if (mask_before != mask_after)
                        kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
@@ -324,7 +338,9 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
        struct kvm_lapic_irq irqe;
        int ret;
 
-       if (entry->fields.mask)
+       if (entry->fields.mask ||
+           (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
+           entry->fields.remote_irr))
                return -1;
 
        ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
index 943acbf00c69d8f423289116bc363159144f883a..e2c1fb8d35cea28af684d4ba76d70a5e2e12e9a5 100644 (file)
@@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
        recalculate_apic_map(apic->vcpu->kvm);
 }
 
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+       return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 {
-       u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+       u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 
        WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
 
@@ -2245,6 +2250,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
 {
        if (apic_x2apic_mode(vcpu->arch.apic)) {
                u32 *id = (u32 *)(s->regs + APIC_ID);
+               u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
                if (vcpu->kvm->arch.x2apic_format) {
                        if (*id != vcpu->vcpu_id)
@@ -2255,6 +2261,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
                        else
                                *id <<= 24;
                }
+
+               /* In x2APIC mode, the LDR is fixed and based on the id */
+               if (set)
+                       *ldr = kvm_apic_calc_x2apic_ldr(*id);
        }
 
        return 0;
index 59e13a79c2e3eea5588be38d3fdb677cf9076b3d..eb714f1cdf7eee4ca9036005c3ab72ef9228ae9b 100644 (file)
@@ -361,6 +361,7 @@ static void recalc_intercepts(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *c, *h;
        struct nested_state *g;
+       u32 h_intercept_exceptions;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 
@@ -371,9 +372,14 @@ static void recalc_intercepts(struct vcpu_svm *svm)
        h = &svm->nested.hsave->control;
        g = &svm->nested;
 
+       /* No need to intercept #UD if L1 doesn't intercept it */
+       h_intercept_exceptions =
+               h->intercept_exceptions & ~(1U << UD_VECTOR);
+
        c->intercept_cr = h->intercept_cr | g->intercept_cr;
        c->intercept_dr = h->intercept_dr | g->intercept_dr;
-       c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+       c->intercept_exceptions =
+               h_intercept_exceptions | g->intercept_exceptions;
        c->intercept = h->intercept | g->intercept;
 }
 
@@ -2196,7 +2202,10 @@ static int ud_interception(struct vcpu_svm *svm)
 {
        int er;
 
+       WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
        er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+       if (er == EMULATE_USER_EXIT)
+               return 0;
        if (er != EMULATE_DONE)
                kvm_queue_exception(&svm->vcpu, UD_VECTOR);
        return 1;
index 714a0673ec3cb2153dce2f192f412fc14e0d19df..4704aaf6d19e2ea36e3d12ddc0c345bc8ed3632f 100644 (file)
@@ -1887,7 +1887,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
        u32 eb;
 
-       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+       eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
             (1u << DB_VECTOR) | (1u << AC_VECTOR);
        if ((vcpu->guest_debug &
             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,6 +1905,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
         */
        if (is_guest_mode(vcpu))
                eb |= get_vmcs12(vcpu)->exception_bitmap;
+       else
+               eb |= 1u << UD_VECTOR;
 
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -5600,7 +5602,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
        }
 
-       vmcs_writel(GUEST_RFLAGS, 0x02);
+       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
        vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -5915,11 +5917,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_invalid_opcode(intr_info)) {
-               if (is_guest_mode(vcpu)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
-                       return 1;
-               }
+               WARN_ON_ONCE(is_guest_mode(vcpu));
                er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+               if (er == EMULATE_USER_EXIT)
+                       return 0;
                if (er != EMULATE_DONE)
                        kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
@@ -6602,7 +6603,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
+               err = emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -7414,10 +7415,11 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
  */
 static void free_nested(struct vcpu_vmx *vmx)
 {
-       if (!vmx->nested.vmxon)
+       if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
        vmx->nested.vmxon = false;
+       vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
@@ -9800,8 +9802,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
        cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
        cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
        cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
-       /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
-       cr4_fixed1_update(bit(11),            ecx, bit(2));
+       cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
 
 #undef cr4_fixed1_update
 }
@@ -10875,6 +10876,11 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                        return 1;
        }
 
+       if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+               (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+               (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+                       return 1;
+
        return 0;
 }
 
@@ -11099,13 +11105,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
-
-       if (kvm_event_needs_reinjection(vcpu))
-               return -EBUSY;
+       bool block_nested_events =
+           vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
 
        if (vcpu->arch.exception.pending &&
                nested_vmx_check_exception(vcpu, &exit_qual)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
                vcpu->arch.exception.pending = false;
@@ -11114,14 +11119,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
            vmx->nested.preemption_timer_expired) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
                return 0;
        }
 
        if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                                  NMI_VECTOR | INTR_TYPE_NMI_INTR |
@@ -11137,7 +11142,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
            nested_exit_on_intr(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
                return 0;
@@ -11324,6 +11329,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        kvm_clear_interrupt_queue(vcpu);
 }
 
+static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
+                       struct vmcs12 *vmcs12)
+{
+       u32 entry_failure_code;
+
+       nested_ept_uninit_mmu_context(vcpu);
+
+       /*
+        * Only PDPTE load can fail as the value of cr3 was checked on entry and
+        * couldn't have changed.
+        */
+       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+       if (!enable_ept)
+               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+}
+
 /*
  * A part of what we need to when the nested L2 guest exits and we want to
  * run its L1 parent, is to reset L1's guest state to the host state specified
@@ -11337,7 +11360,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                                   struct vmcs12 *vmcs12)
 {
        struct kvm_segment seg;
-       u32 entry_failure_code;
 
        if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
                vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -11364,17 +11386,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
        vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
-       nested_ept_uninit_mmu_context(vcpu);
-
-       /*
-        * Only PDPTE load can fail as the value of cr3 was checked on entry and
-        * couldn't have changed.
-        */
-       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
-               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
-       if (!enable_ept)
-               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
 
        if (enable_vpid) {
                /*
@@ -11604,6 +11616,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
         * accordingly.
         */
        nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
+
        /*
         * The emulated instruction was already skipped in
         * nested_vmx_run, but the updated RIP was never
index 34c85aa2e2d1d40ffc65f461d45b50d8666b5491..eee8e7faf1af5778763b8181df472651d3573149 100644 (file)
@@ -107,6 +107,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
 static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
+static bool __read_mostly report_ignored_msrs = true;
+module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+
 unsigned int min_timer_period_us = 500;
 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
 
@@ -1795,10 +1798,13 @@ u64 get_kvmclock_ns(struct kvm *kvm)
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
 
-       kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
-                          &hv_clock.tsc_shift,
-                          &hv_clock.tsc_to_system_mul);
-       ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       if (__this_cpu_read(cpu_tsc_khz)) {
+               kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+                                  &hv_clock.tsc_shift,
+                                  &hv_clock.tsc_to_system_mul);
+               ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       } else
+               ret = ktime_get_boot_ns() + ka->kvmclock_offset;
 
        put_cpu();
 
@@ -1830,6 +1836,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
         */
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
+       if (guest_hv_clock.version & 1)
+               ++guest_hv_clock.version;  /* first time write, random junk */
+
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
        kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
                                &vcpu->hv_clock,
@@ -2322,7 +2331,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
                 */
-               vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
+               if (report_ignored_msrs)
+                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
+                               msr, data);
                break;
        case MSR_AMD64_OSVW_ID_LENGTH:
                if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
@@ -2359,8 +2370,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    msr, data);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
-                                   msr, data);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu,
+                                       "ignored wrmsr: 0x%x data 0x%llx\n",
+                                       msr, data);
                        break;
                }
        }
@@ -2578,7 +2591,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                               msr_info->index);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
+                                       msr_info->index);
                        msr_info->data = 0;
                }
                break;
@@ -5430,7 +5445,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
-               r = EMULATE_FAIL;
+               r = EMULATE_USER_EXIT;
        }
        kvm_queue_exception(vcpu, UD_VECTOR);
 
@@ -5722,6 +5737,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                        if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
                                                emulation_type))
                                return EMULATE_DONE;
+                       if (ctxt->have_exception && inject_emulated_exception(vcpu))
+                               return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
                        return handle_emulation_failure(vcpu);
@@ -7250,12 +7267,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct fpu *fpu = &current->thread.fpu;
        int r;
-       sigset_t sigsaved;
 
        fpu__initialize(fpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
@@ -7298,8 +7313,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 out:
        post_kvm_run_save(vcpu);
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index a5bcdfb890f1b77fa8f5f2b349a05d7f759fb661..837d4dd7678545dec75f53c21bcf5e2dea500202 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 228229f3bb76d2f9770b2191cee67328b6f9e983..8bfdea58159ba9ffd972dd95717e0eee99101e0a 100644 (file)
@@ -1819,7 +1819,7 @@ EXPORT_SYMBOL(bio_endio);
 struct bio *bio_split(struct bio *bio, int sectors,
                      gfp_t gfp, struct bio_set *bs)
 {
-       struct bio *split = NULL;
+       struct bio *split;
 
        BUG_ON(sectors <= 0);
        BUG_ON(sectors >= bio_sectors(bio));
index e54be402899daa18dc45ef35130d7e191d459bb0..870484eaed1f64b586ccb0fd4dd34c3f9462a29d 100644 (file)
@@ -450,12 +450,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
                ret = wbt_init(q);
                if (ret)
                        return ret;
-
-               rwb = q->rq_wb;
-               if (!rwb)
-                       return -EINVAL;
        }
 
+       rwb = q->rq_wb;
        if (val == -1)
                rwb->min_lat_nsec = wbt_default_latency_nsec(q);
        else if (val >= 0)
index b252da0e4c11051f7c78be797122448e231a0bde..ae8de9780085ae7b8e99237ed16fc9cd02b233a5 100644 (file)
@@ -178,12 +178,11 @@ void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
 
                if (wbt_is_read(stat))
                        wb_timestamp(rwb, &rwb->last_comp);
-               wbt_clear_state(stat);
        } else {
                WARN_ON_ONCE(stat == rwb->sync_cookie);
                __wbt_done(rwb, wbt_stat_to_mask(stat));
-               wbt_clear_state(stat);
        }
+       wbt_clear_state(stat);
 }
 
 /*
@@ -482,7 +481,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 
        /*
         * At this point we know it's a buffered write. If this is
-        * kswapd trying to free memory, or REQ_SYNC is set, set, then
+        * kswapd trying to free memory, or REQ_SYNC is set, then
         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
         * that. If the write is marked as a background write, then use
         * the idle limit, or go to normal if we haven't had competing
@@ -723,8 +722,6 @@ int wbt_init(struct request_queue *q)
                init_waitqueue_head(&rwb->rq_wait[i].wait);
        }
 
-       rwb->wc = 1;
-       rwb->queue_depth = RWB_DEF_DEPTH;
        rwb->last_comp = rwb->last_issue = jiffies;
        rwb->queue = q;
        rwb->win_nsec = RWB_WINDOW_NSEC;
index c2223f12a8051411d4e89a0bc2850e03d7d0427e..96a66f67172045d571be9fe18248dbefc99766ef 100644 (file)
@@ -671,10 +671,13 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
                disk->flags |= GENHD_FL_NO_PART_SCAN;
        } else {
+               int ret;
+
                /* Register BDI before referencing it from bdev */
                disk_to_dev(disk)->devt = devt;
-               bdi_register_owner(disk->queue->backing_dev_info,
-                               disk_to_dev(disk));
+               ret = bdi_register_owner(disk->queue->backing_dev_info,
+                                               disk_to_dev(disk));
+               WARN_ON(ret);
                blk_register_region(disk_devt(disk), disk->minors, NULL,
                                    exact_match, exact_lock, disk);
        }
@@ -1389,7 +1392,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
 
        if (minors > DISK_MAX_PARTS) {
                printk(KERN_ERR
-                       "block: can't allocated more than %d partitions\n",
+                       "block: can't allocate more than %d partitions\n",
                        DISK_MAX_PARTS);
                minors = DISK_MAX_PARTS;
        }
index 24418932612eeabc62d72185df50980ed55ee5bd..a041689e5701d7e1ed2a2677a30982827ee2276a 100644 (file)
@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
        int count;
        struct acpi_hardware_id *id;
 
+       /* Avoid unnecessarily loading modules for non present devices. */
+       if (!acpi_device_is_present(acpi_dev))
+               return 0;
+
        /*
         * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
         * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
index da176c95aa2cb3a4fd89e7d0b100d0ff7d161405..0252c9b9af3d3a099ff955b9deea730bf270a098 100644 (file)
@@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
 {
        struct acpi_ec *ec = NULL;
        int ret;
+       bool is_ecdt = false;
+       acpi_status status;
 
        strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_EC_CLASS);
 
-       ec = acpi_ec_alloc();
-       if (!ec)
-               return -ENOMEM;
-       if (ec_parse_device(device->handle, 0, ec, NULL) !=
-               AE_CTRL_TERMINATE) {
+       if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
+               is_ecdt = true;
+               ec = boot_ec;
+       } else {
+               ec = acpi_ec_alloc();
+               if (!ec)
+                       return -ENOMEM;
+               status = ec_parse_device(device->handle, 0, ec, NULL);
+               if (status != AE_CTRL_TERMINATE) {
                        ret = -EINVAL;
                        goto err_alloc;
+               }
        }
 
        if (acpi_is_boot_ec(ec)) {
-               boot_ec_is_ecdt = false;
-               /*
-                * Trust PNP0C09 namespace location rather than ECDT ID.
-                *
-                * But trust ECDT GPE rather than _GPE because of ASUS quirks,
-                * so do not change boot_ec->gpe to ec->gpe.
-                */
-               boot_ec->handle = ec->handle;
-               acpi_handle_debug(ec->handle, "duplicated.\n");
-               acpi_ec_free(ec);
-               ec = boot_ec;
-               ret = acpi_config_boot_ec(ec, ec->handle, true, false);
+               boot_ec_is_ecdt = is_ecdt;
+               if (!is_ecdt) {
+                       /*
+                        * Trust PNP0C09 namespace location rather than
+                        * ECDT ID. But trust ECDT GPE rather than _GPE
+                        * because of ASUS quirks, so do not change
+                        * boot_ec->gpe to ec->gpe.
+                        */
+                       boot_ec->handle = ec->handle;
+                       acpi_handle_debug(ec->handle, "duplicated.\n");
+                       acpi_ec_free(ec);
+                       ec = boot_ec;
+               }
+               ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
        } else
                ret = acpi_ec_setup(ec, true);
        if (ret)
@@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
        ret = !!request_region(ec->command_addr, 1, "EC cmd");
        WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
-       /* Reprobe devices depending on the EC */
-       acpi_walk_dep_device_list(ec->handle);
+       if (!is_ecdt) {
+               /* Reprobe devices depending on the EC */
+               acpi_walk_dep_device_list(ec->handle);
+       }
        acpi_handle_debug(ec->handle, "enumerated.\n");
        return 0;
 
@@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
 
 static const struct acpi_device_id ec_device_ids[] = {
        {"PNP0C09", 0},
+       {ACPI_ECDT_HID, 0},
        {"", 0},
 };
 
@@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
         * Note: ec->handle can be valid if this function is called after
         * acpi_ec_add(), hence the fast path.
         */
-       if (boot_ec->handle != ACPI_ROOT_OBJECT)
-               handle = boot_ec->handle;
-       else if (!acpi_ec_ecdt_get_handle(&handle))
-               return -ENODEV;
-       return acpi_config_boot_ec(boot_ec, handle, true, true);
+       if (boot_ec->handle == ACPI_ROOT_OBJECT) {
+               if (!acpi_ec_ecdt_get_handle(&handle))
+                       return -ENODEV;
+               boot_ec->handle = handle;
+       }
+
+       /* Register to ACPI bus with PM ops attached */
+       return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
 }
 
 #if 0
@@ -2022,6 +2037,12 @@ int __init acpi_ec_init(void)
 
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+       /*
+        * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
+        * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
+        * settings but invalid DSDT settings.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=196847
+        */
        ecdt_fail = acpi_ec_ecdt_start();
        return ecdt_fail && dsdt_fail ? -ENODEV : 0;
 }
index fc8c43e767074c177c56c1c97adc6a3a077d5336..7f43423de43cebdd0788f2a9d75eed24722bc1a7 100644 (file)
@@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
 bool acpi_device_is_first_physical_node(struct acpi_device *adev,
                                        const struct device *dev);
+int acpi_bus_register_early_device(int type);
 
 /* --------------------------------------------------------------------------
                      Device Matching and Notification
index e14e964bfe6d79ad0a686f1769181f7cff527b27..b0fe5272c76aadfa59493bc954c6a545bbbc2008 100644 (file)
@@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
        case ACPI_BUS_TYPE_SLEEP_BUTTON:
                strcpy(device->pnp.bus_id, "SLPF");
                break;
+       case ACPI_BUS_TYPE_ECDT_EC:
+               strcpy(device->pnp.bus_id, "ECDT");
+               break;
        default:
                acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
                /* Clean up trailing underscores (if any) */
@@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
        case ACPI_BUS_TYPE_SLEEP_BUTTON:
                acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
                break;
+       case ACPI_BUS_TYPE_ECDT_EC:
+               acpi_add_id(pnp, ACPI_ECDT_HID);
+               break;
        }
 }
 
@@ -2046,6 +2052,21 @@ void acpi_bus_trim(struct acpi_device *adev)
 }
 EXPORT_SYMBOL_GPL(acpi_bus_trim);
 
+int acpi_bus_register_early_device(int type)
+{
+       struct acpi_device *device = NULL;
+       int result;
+
+       result = acpi_add_single_object(&device, NULL,
+                                       type, ACPI_STA_DEFAULT);
+       if (result)
+               return result;
+
+       device->flags.match_driver = true;
+       return device_attach(&device->dev);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
+
 static int acpi_bus_scan_fixed(void)
 {
        int result = 0;
index c61960deb74aac4277d7994f8020fc8f7a65d3bd..ccb9975a97fa3f214d658776450ab618bae26643 100644 (file)
@@ -471,7 +471,6 @@ static void nullb_device_release(struct config_item *item)
 {
        struct nullb_device *dev = to_nullb_device(item);
 
-       badblocks_exit(&dev->badblocks);
        null_free_device_storage(dev, false);
        null_free_dev(dev);
 }
@@ -582,6 +581,10 @@ static struct nullb_device *null_alloc_dev(void)
 
 static void null_free_dev(struct nullb_device *dev)
 {
+       if (!dev)
+               return;
+
+       badblocks_exit(&dev->badblocks);
        kfree(dev);
 }
 
index 4ebae43118effe98f4763618cbd0777060e8e134..d8addbce40bcc4f9c6a29e32c98cd0c15bac15b4 100644 (file)
@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
 
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
+       depends on LEMOTE_MACH2F
        help
          This option adds a CPUFreq driver for loongson processors which
          support software configurable cpu frequency.
@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
 
 config LOONGSON1_CPUFREQ
        tristate "Loongson1 CPUFreq Driver"
+       depends on LOONGSON1_LS1B
        help
          This option adds a CPUFreq driver for loongson1 processors which
          support software configurable cpu frequency.
index 18c4bd9a5c6564776c7ac5f35e259daae5662f48..e0d5090b303dd3840ddb2a53d2481d6ba6bacf50 100644 (file)
@@ -620,3 +620,7 @@ static int __init mtk_cpufreq_driver_init(void)
        return 0;
 }
 device_initcall(mtk_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
index 6833ada237ab7d94540671d811f1dbcde2bb59db..7b0bf825c4e73c588ff93183cf5315665d69e082 100644 (file)
@@ -428,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
        return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
 }
 
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       struct file *filp = vma->vm_file;
+       struct dev_dax *dev_dax = filp->private_data;
+       struct dax_region *dax_region = dev_dax->region;
+
+       if (!IS_ALIGNED(addr, dax_region->align))
+               return -EINVAL;
+       return 0;
+}
+
 static const struct vm_operations_struct dax_vm_ops = {
        .fault = dev_dax_fault,
        .huge_fault = dev_dax_huge_fault,
+       .split = dev_dax_split,
 };
 
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
index 5cfe39f7a45f080f56f36eea6259ec4c1b1df8b6..deb483064f53c3e680d34b655360faac04853c3f 100644 (file)
@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
 {
        pr_debug("fw_cfg: unloading.\n");
        fw_cfg_sysfs_cache_cleanup();
+       sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+       fw_cfg_io_cleanup();
        fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
        fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
-       fw_cfg_io_cleanup();
        return 0;
 }
 
index 5afaf6016b4a654f552c5a7439bfac8e3425ddc6..0b14b537378345870f7aec058b791add80f8fa95 100644 (file)
@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
                          struct amdgpu_queue_mgr *mgr);
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         struct amdgpu_queue_mgr *mgr,
-                        int hw_ip, int instance, int ring,
+                        u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring);
 
 /*
@@ -1572,18 +1572,14 @@ struct amdgpu_device {
        /* sdma */
        struct amdgpu_sdma              sdma;
 
-       union {
-               struct {
-                       /* uvd */
-                       struct amdgpu_uvd               uvd;
+       /* uvd */
+       struct amdgpu_uvd               uvd;
 
-                       /* vce */
-                       struct amdgpu_vce               vce;
-               };
+       /* vce */
+       struct amdgpu_vce               vce;
 
-               /* vcn */
-               struct amdgpu_vcn               vcn;
-       };
+       /* vcn */
+       struct amdgpu_vcn               vcn;
 
        /* firmwares */
        struct amdgpu_firmware          firmware;
index 47d1c132ac40b24c719c5801c84f2a55b4c598e2..1e3e9be7d77ecf29883cf0ec5d5874f0cb67bd64 100644 (file)
@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
+       unsigned long end_jiffies;
        uint32_t sdma_base_addr;
+       uint32_t data;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
 
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
-                       m->sdma_rlc_virtual_addr);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+               m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
 
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
-                       m->sdma_rlc_rb_base);
+       end_jiffies = msecs_to_jiffies(2000) + jiffies;
+       while (true) {
+               data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+               if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+                       break;
+               if (time_after(jiffies, end_jiffies))
+                       return -ETIME;
+               usleep_range(500, 1000);
+       }
+       if (m->sdma_engine_id) {
+               data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+               data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+                               RESUME_CTX, 0);
+               WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+       } else {
+               data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+               data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+                               RESUME_CTX, 0);
+               WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+       }
 
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+                               m->sdma_rlc_doorbell);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+                               m->sdma_rlc_virtual_addr);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
                        m->sdma_rlc_rb_base_hi);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
                        m->sdma_rlc_rb_rptr_addr_lo);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
                        m->sdma_rlc_rb_rptr_addr_hi);
-
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
-                       m->sdma_rlc_doorbell);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
                        m->sdma_rlc_rb_cntl);
 
@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
        }
 
        WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+               RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+               SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
 
        return 0;
 }
index a57cec737c18ab1b8db405607042a4363493205b..57abf7abd7a9cda177e9e82c6c5e42d3dc759f6d 100644 (file)
@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
                if (candidate->robj == validated)
                        break;
 
+               /* We can't move pinned BOs here */
+               if (bo->pin_count)
+                       continue;
+
                other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 
                /* Check if this BO is in one of the domains we need space for */
index 2c574374d9b6884e6c4473f2dd3ede86b7a612df..3573ecdb06eeff06d2f1507cdadd4f0b05ba9435 100644 (file)
@@ -1837,9 +1837,6 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.hw = false;
        }
 
-       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
-               amdgpu_ucode_fini_bo(adev);
-
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
index ec96bb1f9eafbc374cdad09c85f96da8e8d1bbad..c2f414ffb2cc205c40873afaabeee821ffa816e6 100644 (file)
@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        /* Raven */
-       {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 
        {0, 0, 0}
 };
index 033fba2def6f775b35f4f840f7032477c4403ad3..5f5aa5fddc169355077a4e61665563c087d860f5 100644 (file)
@@ -164,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle)
                ret = adev->powerplay.ip_funcs->hw_fini(
                                        adev->powerplay.pp_handle);
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+               amdgpu_ucode_fini_bo(adev);
+
        return ret;
 }
 
index 7714f4a6c8b000072c2b7c3d8691884b3d902a35..447d446b50150d475cb9a01945706b17bbfc2e78 100644 (file)
@@ -442,6 +442,8 @@ static int psp_hw_fini(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
+       amdgpu_ucode_fini_bo(adev);
+
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
index 190e28cb827e535d247377e731507a82bc20dc3a..93d86619e802c998636d61f36f2daf2e2d5d1c34 100644 (file)
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
 
 static int amdgpu_identity_map(struct amdgpu_device *adev,
                               struct amdgpu_queue_mapper *mapper,
-                              int ring,
+                              u32 ring,
                               struct amdgpu_ring **out_ring)
 {
        switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
 
 static int amdgpu_lru_map(struct amdgpu_device *adev,
                          struct amdgpu_queue_mapper *mapper,
-                         int user_ring, bool lru_pipe_order,
+                         u32 user_ring, bool lru_pipe_order,
                          struct amdgpu_ring **out_ring)
 {
        int r, i, j;
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
  */
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         struct amdgpu_queue_mgr *mgr,
-                        int hw_ip, int instance, int ring,
+                        u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring)
 {
        int r, ip_num_rings;
index 793b1470284d67b13c68954481520f7ab3ef635e..a296f7bbe57cbb0c768499a6c254e433d931773a 100644 (file)
@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
        {mmPA_SC_RASTER_CONFIG_1, true},
 };
 
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
-                                         u32 se_num, u32 sh_num,
-                                         u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+                                      bool indexed, u32 se_num,
+                                      u32 sh_num, u32 reg_offset)
 {
-       uint32_t val;
+       if (indexed) {
+               uint32_t val;
+               unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+               unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+               switch (reg_offset) {
+               case mmCC_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+               case mmGC_USER_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+               case mmPA_SC_RASTER_CONFIG:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+               case mmPA_SC_RASTER_CONFIG_1:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+               }
 
-       mutex_lock(&adev->grbm_idx_mutex);
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+               mutex_lock(&adev->grbm_idx_mutex);
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-       val = RREG32(reg_offset);
+               val = RREG32(reg_offset);
 
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-       mutex_unlock(&adev->grbm_idx_mutex);
-       return val;
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+               return val;
+       } else {
+               unsigned idx;
+
+               switch (reg_offset) {
+               case mmGB_ADDR_CONFIG:
+                       return adev->gfx.config.gb_addr_config;
+               case mmMC_ARB_RAMCFG:
+                       return adev->gfx.config.mc_arb_ramcfg;
+               case mmGB_TILE_MODE0:
+               case mmGB_TILE_MODE1:
+               case mmGB_TILE_MODE2:
+               case mmGB_TILE_MODE3:
+               case mmGB_TILE_MODE4:
+               case mmGB_TILE_MODE5:
+               case mmGB_TILE_MODE6:
+               case mmGB_TILE_MODE7:
+               case mmGB_TILE_MODE8:
+               case mmGB_TILE_MODE9:
+               case mmGB_TILE_MODE10:
+               case mmGB_TILE_MODE11:
+               case mmGB_TILE_MODE12:
+               case mmGB_TILE_MODE13:
+               case mmGB_TILE_MODE14:
+               case mmGB_TILE_MODE15:
+               case mmGB_TILE_MODE16:
+               case mmGB_TILE_MODE17:
+               case mmGB_TILE_MODE18:
+               case mmGB_TILE_MODE19:
+               case mmGB_TILE_MODE20:
+               case mmGB_TILE_MODE21:
+               case mmGB_TILE_MODE22:
+               case mmGB_TILE_MODE23:
+               case mmGB_TILE_MODE24:
+               case mmGB_TILE_MODE25:
+               case mmGB_TILE_MODE26:
+               case mmGB_TILE_MODE27:
+               case mmGB_TILE_MODE28:
+               case mmGB_TILE_MODE29:
+               case mmGB_TILE_MODE30:
+               case mmGB_TILE_MODE31:
+                       idx = (reg_offset - mmGB_TILE_MODE0);
+                       return adev->gfx.config.tile_mode_array[idx];
+               case mmGB_MACROTILE_MODE0:
+               case mmGB_MACROTILE_MODE1:
+               case mmGB_MACROTILE_MODE2:
+               case mmGB_MACROTILE_MODE3:
+               case mmGB_MACROTILE_MODE4:
+               case mmGB_MACROTILE_MODE5:
+               case mmGB_MACROTILE_MODE6:
+               case mmGB_MACROTILE_MODE7:
+               case mmGB_MACROTILE_MODE8:
+               case mmGB_MACROTILE_MODE9:
+               case mmGB_MACROTILE_MODE10:
+               case mmGB_MACROTILE_MODE11:
+               case mmGB_MACROTILE_MODE12:
+               case mmGB_MACROTILE_MODE13:
+               case mmGB_MACROTILE_MODE14:
+               case mmGB_MACROTILE_MODE15:
+                       idx = (reg_offset - mmGB_MACROTILE_MODE0);
+                       return adev->gfx.config.macrotile_mode_array[idx];
+               default:
+                       return RREG32(reg_offset);
+               }
+       }
 }
 
 static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
 
        *value = 0;
        for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+               bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
                if (reg_offset != cik_allowed_read_registers[i].reg_offset)
                        continue;
 
-               *value = cik_allowed_read_registers[i].grbm_indexed ?
-                        cik_read_indexed_register(adev, se_num,
-                                                  sh_num, reg_offset) :
-                        RREG32(reg_offset);
+               *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+                                               reg_offset);
                return 0;
        }
        return -EINVAL;
index 5c8a7a48a4adb16834ab5893e2341c03a5899d7d..419ba0ce7ee5b0cc343e14665ab61d91f3801d93 100644 (file)
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
                                                        adev->gfx.config.backend_enable_mask,
                                                        num_rb_pipes);
        }
+
+       /* cache the values for userspace */
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+                       adev->gfx.config.rb_config[i][j].rb_backend_disable =
+                               RREG32(mmCC_RB_BACKEND_DISABLE);
+                       adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+                               RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+                       adev->gfx.config.rb_config[i][j].raster_config =
+                               RREG32(mmPA_SC_RASTER_CONFIG);
+                       adev->gfx.config.rb_config[i][j].raster_config_1 =
+                               RREG32(mmPA_SC_RASTER_CONFIG_1);
+               }
+       }
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
index 1eb4d79d6e306f7137daa4e57e58078993f608d3..0450ac5ba6b6d1db4e3d19b8b1b027008ed112e9 100644 (file)
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
 
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
        adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
 }
 
index 6c5a9cab55ded2448f99ae9c2ec1c59bbd68a3e7..f744caeaee049587520c75524f3ee6e580b8a63e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/device.h>
+#include <linux/printk.h>
 #include "kfd_priv.h"
 
 #define KFD_DRIVER_AUTHOR      "AMD Inc. and others"
@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void)
        kfd_process_destroy_wq();
        kfd_topology_shutdown();
        kfd_chardev_exit();
-       dev_info(kfd_device, "Removed module\n");
+       pr_info("amdkfd: Removed module\n");
 }
 
 module_init(kfd_module_init);
index 4859d263fa2a3ce51a816b2f3b36b98f298cf9f7..4728fad3fd7425ca2e0ef2fbb805dc145d078df7 100644 (file)
@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        struct cik_sdma_rlc_registers *m;
 
        m = get_sdma_mqd(mqd);
-       m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
-                       SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+       m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+                       << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
                        q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
                        1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
                        6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
index 2bec902fc93906c7bbbe562e4d906048f0efa336..a3f1e62c60ba9d80b7d0244be3b3e3301a64557c 100644 (file)
@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 
        switch (type) {
        case KFD_QUEUE_TYPE_SDMA:
+               if (dev->dqm->queue_count >=
+                       CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+                       pr_err("Over-subscription is not allowed for SDMA.\n");
+                       retval = -EPERM;
+                       goto err_create_queue;
+               }
+
+               retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+               if (retval != 0)
+                       goto err_create_queue;
+               pqn->q = q;
+               pqn->kq = NULL;
+               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+                                               &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
+               print_queue(q);
+               break;
+
        case KFD_QUEUE_TYPE_COMPUTE:
                /* check if there is over subscription */
                if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
index 889ed24084e866bb846d9b6cc1f52a38e51cb5eb..f71fe6d2ddda795fd2fb914740b75845893c1298 100644 (file)
@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                aconnector = to_amdgpu_dm_connector(connector);
-               if (aconnector->dc_link->type == dc_connection_mst_branch) {
+               if (aconnector->dc_link->type == dc_connection_mst_branch &&
+                   aconnector->mst_mgr.aux) {
                        DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
                                        aconnector, aconnector->base.base.id);
 
@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 
                mutex_lock(&aconnector->hpd_lock);
                dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+               if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+                       aconnector->fake_enable = false;
+
                aconnector->dc_sink = NULL;
                amdgpu_dm_update_connector_after_detect(aconnector);
                mutex_unlock(&aconnector->hpd_lock);
@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 
        ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
 
-       drm_atomic_state_put(adev->dm.cached_state);
        adev->dm.cached_state = NULL;
 
        amdgpu_dm_irq_resume_late(adev);
@@ -2704,7 +2708,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_VIRTUAL
        };
-       struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+       struct edid *edid;
 
        if (!aconnector->base.edid_blob_ptr ||
                !aconnector->base.edid_blob_ptr->data) {
@@ -2716,6 +2720,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
                return;
        }
 
+       edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
        aconnector->edid = edid;
 
        aconnector->dc_em_sink = dc_link_add_remote_sink(
@@ -4193,13 +4199,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
                                dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
 
+               if (!dm_new_crtc_state->stream)
+                       continue;
+
                status = dc_stream_get_status(dm_new_crtc_state->stream);
                WARN_ON(!status);
                WARN_ON(!status->plane_count);
 
-               if (!dm_new_crtc_state->stream)
-                       continue;
-
                /*TODO How it works with MPO ?*/
                if (!dc_commit_planes_to_stream(
                                dm->dc,
@@ -4253,7 +4259,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        drm_atomic_helper_commit_hw_done(state);
 
        if (wait_for_vblank)
-               drm_atomic_helper_wait_for_vblanks(dev, state);
+               drm_atomic_helper_wait_for_flip_done(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 }
@@ -4332,9 +4338,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
                return;
 
        disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
-       acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+       if (!disconnected_acrtc)
+               return;
 
-       if (!disconnected_acrtc || !acrtc_state->stream)
+       acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+       if (!acrtc_state->stream)
                return;
 
        /*
@@ -4455,7 +4463,7 @@ static int dm_update_crtcs_state(struct dc *dc,
                        }
                }
 
-               if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+               if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
                                dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
 
                        new_crtc_state->mode_changed = false;
@@ -4709,7 +4717,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                }
        } else {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-                       if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+                       if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+                                       !new_crtc_state->color_mgmt_changed)
                                continue;
 
                        if (!new_crtc_state->enable)
index 785b943b60ed11b12ccbeeca27c9cc30e3219c81..6e43168fbdd65b1d12be9d647eb2763200c89d27 100644 (file)
@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx,
                if (signal == signal_type_info_tbl[i].type)
                        break;
 
+       if (i == NUM_ELEMENTS(signal_type_info_tbl))
+               goto fail;
+
        dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
                        signal_type_info_tbl[i].name,
                        link->link_index);
@@ -96,6 +99,8 @@ void dc_conn_log(struct dc_context *ctx,
 
        dm_logger_append(&entry, "^\n");
        dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
        dm_logger_close(&entry);
 
        va_end(args);
index aaaebd06d7ee33bb1f5f3bdb91eff71e752d88e9..86e6438c5cf35a6c0f981524454d58af417a183d 100644 (file)
@@ -249,7 +249,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
        struct graphics_object_id *dest_object_id)
 {
        uint32_t number;
-       uint16_t *id;
+       uint16_t *id = NULL;
        ATOM_OBJECT *object;
        struct bios_parser *bp = BP_FROM_DCB(dcb);
 
@@ -260,7 +260,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
 
        number = get_dest_obj_list(bp, object, &id);
 
-       if (number <= index)
+       if (number <= index || !id)
                return BP_RESULT_BADINPUT;
 
        *dest_object_id = object_id_from_bios_object_id(id[index]);
index fe63f5894d43bf93daf320e5486914bde724a80c..7240db2e6f095ebde8d2b685c0dd17b281c8e6d3 100644 (file)
@@ -121,6 +121,10 @@ static bool create_links(
                        goto failed_alloc;
                }
 
+               link->link_index = dc->link_count;
+               dc->links[dc->link_count] = link;
+               dc->link_count++;
+
                link->ctx = dc->ctx;
                link->dc = dc;
                link->connector_signal = SIGNAL_TYPE_VIRTUAL;
@@ -129,6 +133,13 @@ static bool create_links(
                link->link_id.enum_id = ENUM_ID_1;
                link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
 
+               if (!link->link_enc) {
+                       BREAK_TO_DEBUGGER();
+                       goto failed_alloc;
+               }
+
+               link->link_status.dpcd_caps = &link->dpcd_caps;
+
                enc_init.ctx = dc->ctx;
                enc_init.channel = CHANNEL_ID_UNKNOWN;
                enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
@@ -138,10 +149,6 @@ static bool create_links(
                enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
                enc_init.encoder.enum_id = ENUM_ID_1;
                virtual_link_encoder_construct(link->link_enc, &enc_init);
-
-               link->link_index = dc->link_count;
-               dc->links[dc->link_count] = link;
-               dc->link_count++;
        }
 
        return true;
index 0602610489d759d55263112505ce177ccd728f41..e27ed4a45265290690604b10e6d4df4fbee77514 100644 (file)
@@ -480,22 +480,6 @@ static void detect_dp(
                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                detect_dp_sink_caps(link);
 
-               /* DP active dongles */
-               if (is_dp_active_dongle(link)) {
-                       link->type = dc_connection_active_dongle;
-                       if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
-                               /*
-                                * active dongle unplug processing for short irq
-                                */
-                               link_disconnect_sink(link);
-                               return;
-                       }
-
-                       if (link->dpcd_caps.dongle_type !=
-                       DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
-                               *converter_disable_audio = true;
-                       }
-               }
                if (is_mst_supported(link)) {
                        sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
                        link->type = dc_connection_mst_branch;
@@ -535,6 +519,22 @@ static void detect_dp(
                                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                        }
                }
+
+               if (link->type != dc_connection_mst_branch &&
+                       is_dp_active_dongle(link)) {
+                       /* DP active dongles */
+                       link->type = dc_connection_active_dongle;
+                       if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+                               /*
+                                * active dongle unplug processing for short irq
+                                */
+                               link_disconnect_sink(link);
+                               return;
+                       }
+
+                       if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+                               *converter_disable_audio = true;
+               }
        } else {
                /* DP passive dongles */
                sink_caps->signal = dp_passive_dongle_detection(link->ddc,
@@ -1801,12 +1801,75 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
                link->link_enc->funcs->disable_output(link->link_enc, signal, link);
 }
 
+bool dp_active_dongle_validate_timing(
+               const struct dc_crtc_timing *timing,
+               const struct dc_dongle_caps *dongle_caps)
+{
+       unsigned int required_pix_clk = timing->pix_clk_khz;
+
+       if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+               dongle_caps->extendedCapValid == false)
+               return true;
+
+       /* Check Pixel Encoding */
+       switch (timing->pixel_encoding) {
+       case PIXEL_ENCODING_RGB:
+       case PIXEL_ENCODING_YCBCR444:
+               break;
+       case PIXEL_ENCODING_YCBCR422:
+               if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+                       return false;
+               break;
+       case PIXEL_ENCODING_YCBCR420:
+               if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+                       return false;
+               break;
+       default:
+               /* Invalid Pixel Encoding*/
+               return false;
+       }
+
+
+       /* Check Color Depth and Pixel Clock */
+       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               required_pix_clk /= 2;
+
+       switch (timing->display_color_depth) {
+       case COLOR_DEPTH_666:
+       case COLOR_DEPTH_888:
+               /*888 and 666 should always be supported*/
+               break;
+       case COLOR_DEPTH_101010:
+               if (dongle_caps->dp_hdmi_max_bpc < 10)
+                       return false;
+               required_pix_clk = required_pix_clk * 10 / 8;
+               break;
+       case COLOR_DEPTH_121212:
+               if (dongle_caps->dp_hdmi_max_bpc < 12)
+                       return false;
+               required_pix_clk = required_pix_clk * 12 / 8;
+               break;
+
+       case COLOR_DEPTH_141414:
+       case COLOR_DEPTH_161616:
+       default:
+               /* These color depths are currently not supported */
+               return false;
+       }
+
+       if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+               return false;
+
+       return true;
+}
+
 enum dc_status dc_link_validate_mode_timing(
                const struct dc_stream_state *stream,
                struct dc_link *link,
                const struct dc_crtc_timing *timing)
 {
        uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+       struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
 
        /* A hack to avoid failing any modes for EDID override feature on
         * topology change such as lower quality cable for DP or different dongle
@@ -1814,8 +1877,13 @@ enum dc_status dc_link_validate_mode_timing(
        if (link->remote_sinks[0])
                return DC_OK;
 
+       /* Passive Dongle */
        if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
-               return DC_EXCEED_DONGLE_MAX_CLK;
+               return DC_EXCEED_DONGLE_CAP;
+
+       /* Active Dongle*/
+       if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+               return DC_EXCEED_DONGLE_CAP;
 
        switch (stream->signal) {
        case SIGNAL_TYPE_EDP:
index ced42484dcfc7a45e51aec5ff1c69e823fe78ae9..e6bf05d76a942d0f764c224e9480acc5f07b7950 100644 (file)
@@ -1512,7 +1512,7 @@ static bool hpd_rx_irq_check_link_loss_status(
        struct dc_link *link,
        union hpd_irq_data *hpd_irq_dpcd_data)
 {
-       uint8_t irq_reg_rx_power_state;
+       uint8_t irq_reg_rx_power_state = 0;
        enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
        union lane_status lane_status;
        uint32_t lane;
@@ -1524,60 +1524,55 @@ static bool hpd_rx_irq_check_link_loss_status(
 
        if (link->cur_link_settings.lane_count == 0)
                return return_code;
-       /*1. Check that we can handle interrupt: Not in FS DOS,
-        *  Not in "Display Timeout" state, Link is trained.
-        */
 
-       dpcd_result = core_link_read_dpcd(link,
-               DP_SET_POWER,
-               &irq_reg_rx_power_state,
-               sizeof(irq_reg_rx_power_state));
+       /*1. Check that Link Status changed, before re-training.*/
 
-       if (dpcd_result != DC_OK) {
-               irq_reg_rx_power_state = DP_SET_POWER_D0;
-               dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-                       "%s: DPCD read failed to obtain power state.\n",
-                       __func__);
+       /*parse lane status*/
+       for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+               /* check status of lanes 0,1
+                * changed DpcdAddress_Lane01Status (0x202)
+                */
+               lane_status.raw = get_nibble_at_index(
+                       &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+                       lane);
+
+               if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+                       !lane_status.bits.CR_DONE_0 ||
+                       !lane_status.bits.SYMBOL_LOCKED_0) {
+                       /* if one of the channel equalization, clock
+                        * recovery or symbol lock is dropped
+                        * consider it as (link has been
+                        * dropped) dp sink status has changed
+                        */
+                       sink_status_changed = true;
+                       break;
+               }
        }
 
-       if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
-
-               /*2. Check that Link Status changed, before re-training.*/
-
-               /*parse lane status*/
-               for (lane = 0;
-                       lane < link->cur_link_settings.lane_count;
-                       lane++) {
+       /* Check interlane align.*/
+       if (sink_status_changed ||
+               !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
 
-                       /* check status of lanes 0,1
-                        * changed DpcdAddress_Lane01Status (0x202)*/
-                       lane_status.raw = get_nibble_at_index(
-                               &hpd_irq_dpcd_data->bytes.lane01_status.raw,
-                               lane);
-
-                       if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
-                               !lane_status.bits.CR_DONE_0 ||
-                               !lane_status.bits.SYMBOL_LOCKED_0) {
-                               /* if one of the channel equalization, clock
-                                * recovery or symbol lock is dropped
-                                * consider it as (link has been
-                                * dropped) dp sink status has changed*/
-                               sink_status_changed = true;
-                               break;
-                       }
+               dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+                       "%s: Link Status changed.\n", __func__);
 
-               }
+               return_code = true;
 
-               /* Check interlane align.*/
-               if (sink_status_changed ||
-                       !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
-                       INTERLANE_ALIGN_DONE) {
+               /*2. Check that we can handle interrupt: Not in FS DOS,
+                *  Not in "Display Timeout" state, Link is trained.
+                */
+               dpcd_result = core_link_read_dpcd(link,
+                       DP_SET_POWER,
+                       &irq_reg_rx_power_state,
+                       sizeof(irq_reg_rx_power_state));
 
+               if (dpcd_result != DC_OK) {
                        dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-                               "%s: Link Status changed.\n",
+                               "%s: DPCD read failed to obtain power state.\n",
                                __func__);
-
-                       return_code = true;
+               } else {
+                       if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+                               return_code = false;
                }
        }
 
@@ -2062,6 +2057,24 @@ bool is_dp_active_dongle(const struct dc_link *link)
                        (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
 }
 
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+       switch (bpc) {
+       case DOWN_STREAM_MAX_8BPC:
+               return 8;
+       case DOWN_STREAM_MAX_10BPC:
+               return 10;
+       case DOWN_STREAM_MAX_12BPC:
+               return 12;
+       case DOWN_STREAM_MAX_16BPC:
+               return 16;
+       default:
+               break;
+       }
+
+       return -1;
+}
+
 static void get_active_converter_info(
        uint8_t data, struct dc_link *link)
 {
@@ -2131,7 +2144,8 @@ static void get_active_converter_info(
                                        hdmi_caps.bits.YCrCr420_CONVERSION;
 
                                link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
-                                       hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+                                       translate_dpcd_max_bpc(
+                                               hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
 
                                link->dpcd_caps.dongle_caps.extendedCapValid = true;
                        }
index d1cdf9f8853d7754df856740439740e1493ae111..b7422d3b71efb64a18087213b2e0c08a28e00adf 100644 (file)
@@ -516,13 +516,11 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
                        right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
 
                if (right_view) {
-                       data->viewport.width /= 2;
-                       data->viewport_c.width /= 2;
-                       data->viewport.x +=  data->viewport.width;
-                       data->viewport_c.x +=  data->viewport_c.width;
+                       data->viewport.x +=  data->viewport.width / 2;
+                       data->viewport_c.x +=  data->viewport_c.width / 2;
                        /* Ceil offset pipe */
-                       data->viewport.width += data->viewport.width % 2;
-                       data->viewport_c.width += data->viewport_c.width % 2;
+                       data->viewport.width = (data->viewport.width + 1) / 2;
+                       data->viewport_c.width = (data->viewport_c.width + 1) / 2;
                } else {
                        data->viewport.width /= 2;
                        data->viewport_c.width /= 2;
@@ -580,14 +578,12 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
        if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
                pipe_ctx->plane_state) {
                if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-                       pipe_ctx->plane_res.scl_data.recout.height /= 2;
-                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
                        /* Floor primary pipe, ceil 2ndary pipe */
-                       pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+                       pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
                } else {
-                       pipe_ctx->plane_res.scl_data.recout.width /= 2;
-                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
-                       pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+                       pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
                }
        } else if (pipe_ctx->bottom_pipe &&
                        pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
@@ -856,6 +852,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
        pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
 
+
        /* Taps calculations */
        if (pipe_ctx->plane_res.xfm != NULL)
                res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
@@ -864,16 +861,21 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->plane_res.dpp != NULL)
                res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
                                pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
-
        if (!res) {
                /* Try 24 bpp linebuffer */
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
 
-               res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
-                       pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+               if (pipe_ctx->plane_res.xfm != NULL)
+                       res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+                                       pipe_ctx->plane_res.xfm,
+                                       &pipe_ctx->plane_res.scl_data,
+                                       &plane_state->scaling_quality);
 
-               res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
-                       pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+               if (pipe_ctx->plane_res.dpp != NULL)
+                       res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+                                       pipe_ctx->plane_res.dpp,
+                                       &pipe_ctx->plane_res.scl_data,
+                                       &plane_state->scaling_quality);
        }
 
        if (res)
@@ -991,8 +993,10 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
 
        head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 
-       if (!head_pipe)
+       if (!head_pipe) {
                ASSERT(0);
+               return NULL;
+       }
 
        if (!head_pipe->plane_state)
                return head_pipe;
@@ -1447,11 +1451,16 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link(
 
 static struct audio *find_first_free_audio(
                struct resource_context *res_ctx,
-               const struct resource_pool *pool)
+               const struct resource_pool *pool,
+               enum engine_id id)
 {
        int i;
        for (i = 0; i < pool->audio_count; i++) {
                if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+                       /*we have enough audio endpoint, find the matching inst*/
+                       if (id != i)
+                               continue;
+
                        return pool->audios[i];
                }
        }
@@ -1700,7 +1709,7 @@ enum dc_status resource_map_pool_resources(
            dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
            stream->audio_info.mode_count) {
                pipe_ctx->stream_res.audio = find_first_free_audio(
-               &context->res_ctx, pool);
+               &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
                /*
                 * Audio assigned in order first come first get.
@@ -1765,13 +1774,16 @@ enum dc_status dc_validate_global_state(
        enum dc_status result = DC_ERROR_UNEXPECTED;
        int i, j;
 
+       if (!new_ctx)
+               return DC_ERROR_UNEXPECTED;
+
        if (dc->res_pool->funcs->validate_global) {
                        result = dc->res_pool->funcs->validate_global(dc, new_ctx);
                        if (result != DC_OK)
                                return result;
        }
 
-       for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+       for (i = 0; i < new_ctx->stream_count; i++) {
                struct dc_stream_state *stream = new_ctx->streams[i];
 
                for (j = 0; j < dc->res_pool->pipe_count; j++) {
index b00a6040a69746e24b1291ae9250e225b0290e68..e230cc44a0a7d31f5889d5cb0f4f0688612ace4b 100644 (file)
@@ -263,7 +263,6 @@ bool dc_stream_set_cursor_position(
                struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
                struct mem_input *mi = pipe_ctx->plane_res.mi;
                struct hubp *hubp = pipe_ctx->plane_res.hubp;
-               struct transform *xfm = pipe_ctx->plane_res.xfm;
                struct dpp *dpp = pipe_ctx->plane_res.dpp;
                struct dc_cursor_position pos_cpy = *position;
                struct dc_cursor_mi_param param = {
@@ -294,11 +293,11 @@ bool dc_stream_set_cursor_position(
                if (mi != NULL && mi->funcs->set_cursor_position != NULL)
                        mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
 
-               if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
-                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+               if (!hubp)
+                       continue;
 
-               if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
-                       xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+               if (hubp->funcs->set_cursor_position != NULL)
+                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
 
                if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
                        dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
index 81c40f8864db2086324da7d21abccacb53289744..0df9ecb2710c2ead2e23833a26b4aa03b53706ef 100644 (file)
@@ -352,11 +352,11 @@ void dce_aud_az_enable(struct audio *audio)
        uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
 
        set_reg_field_value(value, 1,
-                       AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-                       CLOCK_GATING_DISABLE);
-               set_reg_field_value(value, 1,
-                       AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-                       AUDIO_ENABLED);
+                           AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                           CLOCK_GATING_DISABLE);
+       set_reg_field_value(value, 1,
+                           AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                           AUDIO_ENABLED);
 
        AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
        value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
index 4fd49a16c3b6ef8d225d16eec01a89bb3a1f9954..e42b6eb1c1f0e133d4ae4c33d6fe804033db275e 100644 (file)
@@ -87,6 +87,9 @@ static void dce110_update_generic_info_packet(
         */
        uint32_t max_retries = 50;
 
+       /*we need turn on clock before programming AFMT block*/
+       REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
        if (REG(AFMT_VBI_PACKET_CONTROL1)) {
                if (packet_index >= 8)
                        ASSERT(0);
index 1229a3315018e4c4fb6af64134b4deb211716360..07ff8d2faf3f4630276d9241092f605274375cda 100644 (file)
@@ -991,6 +991,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
        struct dc_link *link = stream->sink->link;
        struct dc *dc = pipe_ctx->stream->ctx->dc;
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+                       pipe_ctx->stream_res.stream_enc, true);
        if (pipe_ctx->stream_res.audio) {
                pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
 
@@ -1015,18 +1025,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
                 */
        }
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
-
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
-
-       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                       pipe_ctx->stream_res.stream_enc, true);
-
-
        /* blank at encoder level */
        if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
                if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
@@ -1774,6 +1772,10 @@ static enum dc_status validate_fbc(struct dc *dc,
        if (pipe_ctx->stream->sink->link->psr_enabled)
                return DC_ERROR_UNEXPECTED;
 
+       /* Nothing to compress */
+       if (!pipe_ctx->plane_state)
+               return DC_ERROR_UNEXPECTED;
+
        /* Only for non-linear tiling */
        if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
                return DC_ERROR_UNEXPECTED;
@@ -1868,8 +1870,10 @@ static void dce110_reset_hw_ctx_wrap(
                                pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
                        struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
-                       /* disable already, no need to disable again */
-                       if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+                       /* Disable if new stream is null. O/w, if stream is
+                        * disabled already, no need to disable again.
+                        */
+                       if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
                                core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
 
                        pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
index db96d2b47ff1627ab2240c213ff01a69d83ea4fb..61adb8174ce09f8be177a4a2e300cdcb2327cf1f 100644 (file)
@@ -1037,11 +1037,13 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
        struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
                                                 GFP_KERNEL);
 
-       if ((dce110_tgv == NULL) ||
-               (dce110_xfmv == NULL) ||
-               (dce110_miv == NULL) ||
-               (dce110_oppv == NULL))
-                       return false;
+       if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+               kfree(dce110_tgv);
+               kfree(dce110_xfmv);
+               kfree(dce110_miv);
+               kfree(dce110_oppv);
+               return false;
+       }
 
        dce110_opp_v_construct(dce110_oppv, ctx);
 
index 67ac737eaa7e9de169d5aca845c3384dacfc16fb..4befce6cd87a28ea5a330697b8e7c657dbc37cf5 100644 (file)
@@ -1112,10 +1112,7 @@ bool dce110_timing_generator_validate_timing(
        enum signal_type signal)
 {
        uint32_t h_blank;
-       uint32_t h_back_porch;
-       uint32_t hsync_offset = timing->h_border_right +
-                       timing->h_front_porch;
-       uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+       uint32_t h_back_porch, hsync_offset, h_sync_start;
 
        struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 
@@ -1124,6 +1121,9 @@ bool dce110_timing_generator_validate_timing(
        if (!timing)
                return false;
 
+       hsync_offset = timing->h_border_right + timing->h_front_porch;
+       h_sync_start = timing->h_addressable + hsync_offset;
+
        /* Currently we don't support 3D, so block all 3D timings */
        if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
                return false;
index 4c4bd72d4e405ad24e61760828fc3d7c7e439dd3..9fc8f827f2a187f0f72dbc888a1ca25d85986205 100644 (file)
@@ -912,11 +912,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
        struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
        struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
 
-       if (!head_pipe)
+       if (!head_pipe) {
                ASSERT(0);
+               return NULL;
+       }
 
        if (!idle_pipe)
-               return false;
+               return NULL;
 
        idle_pipe->stream = head_pipe->stream;
        idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
index c7333cdf18021750b9cd3805d2f1bcb1008eea69..fced178c8c794493bb3edacce369c1cd81ee00d4 100644 (file)
@@ -496,9 +496,6 @@ static bool tgn10_validate_timing(
                timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
                return false;
 
-       if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
-               tg->ctx->dc->debug.disable_stereo_support)
-               return false;
        /* Temporarily blocking interlacing mode until it's supported */
        if (timing->flags.INTERLACE == 1)
                return false;
index 01df85641684fe2d92ee149fc0b4642a01708ddd..94fc31080fdad1faa14fd4de8c13db8e6ccc2bcc 100644 (file)
@@ -38,7 +38,7 @@ enum dc_status {
        DC_FAIL_DETACH_SURFACES = 8,
        DC_FAIL_SURFACE_VALIDATE = 9,
        DC_NO_DP_LINK_BANDWIDTH = 10,
-       DC_EXCEED_DONGLE_MAX_CLK = 11,
+       DC_EXCEED_DONGLE_CAP = 11,
        DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
        DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
        DC_FAIL_SCALING = 14,
index 7c08bc62c1f53545d362edb3fb102efb12d25fd0..ea88997e1bbd88a65823ce0091994bbc92638bdd 100644 (file)
@@ -259,13 +259,6 @@ struct transform_funcs {
                        struct transform *xfm_base,
                        const struct dc_cursor_attributes *attr);
 
-       void (*set_cursor_position)(
-                       struct transform *xfm_base,
-                       const struct dc_cursor_position *pos,
-                       const struct dc_cursor_mi_param *param,
-                       uint32_t width
-                       );
-
 };
 
 const uint16_t *get_filter_2tap_16p(void);
index 72b22b805412b2cb19d8d90e85904be737ecf335..5a5427bbd70e47e8e8e1e0b744bf356a6175a6bc 100644 (file)
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
                                       formats, ARRAY_SIZE(formats),
                                       NULL,
                                       DRM_PLANE_TYPE_PRIMARY, NULL);
-       if (ret) {
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
        hdlcd->plane = plane;
index 764d0c83710ca563554672d06332fc10883b5de7..0afb53b1f4e92b516088b03e287b09936818dd43 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/console.h>
 #include <linux/list.h>
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
@@ -354,7 +355,7 @@ err_unload:
 err_free:
        drm_mode_config_cleanup(drm);
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -379,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev)
        pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
        drm_mode_config_cleanup(drm);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
 }
@@ -432,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
                return 0;
 
        drm_kms_helper_poll_disable(drm);
+       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
 
        hdlcd->state = drm_atomic_helper_suspend(drm);
        if (IS_ERR(hdlcd->state)) {
+               drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
                drm_kms_helper_poll_enable(drm);
                return PTR_ERR(hdlcd->state);
        }
@@ -451,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev)
                return 0;
 
        drm_atomic_helper_resume(drm, hdlcd->state);
+       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
        drm_kms_helper_poll_enable(drm);
-       pm_runtime_set_active(dev);
 
        return 0;
 }
index 3615d18a7ddf3a5cc49f7d09d68eec35fccc6e3c..904fff80917baa09bfa5cdafa2f56ee2aa4b4474 100644 (file)
@@ -65,8 +65,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
        /* We rely on firmware to set mclk to a sensible level. */
        clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
-       hwdev->modeset(hwdev, &vm);
-       hwdev->leave_config_mode(hwdev);
+       hwdev->hw->modeset(hwdev, &vm);
+       hwdev->hw->leave_config_mode(hwdev);
        drm_crtc_vblank_on(crtc);
 }
 
@@ -77,8 +77,12 @@ static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
        struct malidp_hw_device *hwdev = malidp->dev;
        int err;
 
+       /* always disable planes on the CRTC that is being turned off */
+       drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
        drm_crtc_vblank_off(crtc);
-       hwdev->enter_config_mode(hwdev);
+       hwdev->hw->enter_config_mode(hwdev);
+
        clk_disable_unprepare(hwdev->pxlclk);
 
        err = pm_runtime_put(crtc->dev->dev);
@@ -319,7 +323,7 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
 
 mclk_calc:
        drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
-       ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+       ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
        if (ret < 0)
                return -EINVAL;
        return 0;
@@ -475,7 +479,7 @@ static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->map.de_irq_map.vsync_irq);
+                            hwdev->hw->map.de_irq_map.vsync_irq);
        return 0;
 }
 
@@ -485,7 +489,7 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-                             hwdev->map.de_irq_map.vsync_irq);
+                             hwdev->hw->map.de_irq_map.vsync_irq);
 }
 
 static const struct drm_crtc_funcs malidp_crtc_funcs = {
index b8944666a18f0e72ea4715ff8742c5675b90d8a4..91f2b0191368c942ddb936ad6e736e560342fe99 100644 (file)
@@ -47,10 +47,10 @@ static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
         * directly.
         */
        malidp_hw_write(hwdev, gamma_write_mask,
-                       hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+                       hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
        for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
                malidp_hw_write(hwdev, data[i],
-                               hwdev->map.coeffs_base +
+                               hwdev->hw->map.coeffs_base +
                                MALIDP_COEF_TABLE_DATA);
 }
 
@@ -103,7 +103,7 @@ void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
                        for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
                                malidp_hw_write(hwdev,
                                                mc->coloradj_coeffs[i],
-                                               hwdev->map.coeffs_base +
+                                               hwdev->hw->map.coeffs_base +
                                                MALIDP_COLOR_ADJ_COEF + 4 * i);
 
                malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
@@ -120,8 +120,8 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
        struct malidp_hw_device *hwdev = malidp->dev;
        struct malidp_se_config *s = &cs->scaler_config;
        struct malidp_se_config *old_s = &old_cs->scaler_config;
-       u32 se_control = hwdev->map.se_base +
-                        ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+       u32 se_control = hwdev->hw->map.se_base +
+                        ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
                         0x10 : 0xC);
        u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
        u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
@@ -135,7 +135,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
                return;
        }
 
-       hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+       hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
        val = malidp_hw_read(hwdev, se_control);
        val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
 
@@ -170,9 +170,9 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
        int ret;
 
        atomic_set(&malidp->config_valid, 0);
-       hwdev->set_config_valid(hwdev);
+       hwdev->hw->set_config_valid(hwdev);
        /* don't wait for config_valid flag if we are in config mode */
-       if (hwdev->in_config_mode(hwdev))
+       if (hwdev->hw->in_config_mode(hwdev))
                return 0;
 
        ret = wait_event_interruptible_timeout(malidp->wq,
@@ -455,7 +455,7 @@ static int malidp_runtime_pm_suspend(struct device *dev)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        /* we can only suspend if the hardware is in config mode */
-       WARN_ON(!hwdev->in_config_mode(hwdev));
+       WARN_ON(!hwdev->hw->in_config_mode(hwdev));
 
        hwdev->pm_suspended = true;
        clk_disable_unprepare(hwdev->mclk);
@@ -500,11 +500,7 @@ static int malidp_bind(struct device *dev)
        if (!hwdev)
                return -ENOMEM;
 
-       /*
-        * copy the associated data from malidp_drm_of_match to avoid
-        * having to keep a reference to the OF node after binding
-        */
-       memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+       hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
        malidp->dev = hwdev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -568,13 +564,13 @@ static int malidp_bind(struct device *dev)
                goto query_hw_fail;
        }
 
-       ret = hwdev->query_hw(hwdev);
+       ret = hwdev->hw->query_hw(hwdev);
        if (ret) {
                DRM_ERROR("Invalid HW configuration\n");
                goto query_hw_fail;
        }
 
-       version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+       version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
        DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
                 (version >> 12) & 0xf, (version >> 8) & 0xf);
 
@@ -589,7 +585,7 @@ static int malidp_bind(struct device *dev)
 
        for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
                out_depth = (out_depth << 8) | (output_width[i] & 0xf);
-       malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+       malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
 
        atomic_set(&malidp->config_valid, 0);
        init_waitqueue_head(&malidp->wq);
@@ -671,7 +667,7 @@ query_hw_fail:
                malidp_runtime_pm_suspend(dev);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 alloc_fail:
        of_reserved_mem_device_release(dev);
 
@@ -704,7 +700,7 @@ static void malidp_unbind(struct device *dev)
                malidp_runtime_pm_suspend(dev);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 }
 
index 17bca99e8ac825334c982122fec82f22ebac636c..2bfb542135ac5ab3201a9934fe32413d6984e354 100644 (file)
@@ -183,7 +183,7 @@ static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
 
        malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
                        break;
                /*
@@ -203,7 +203,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
        malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
        malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
                        break;
                usleep_range(100, 1000);
@@ -216,7 +216,7 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
 {
        u32 status;
 
-       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
        if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
                return true;
 
@@ -407,7 +407,7 @@ static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
 
        malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
                        break;
                /*
@@ -427,7 +427,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
        malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
        malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
                        break;
                usleep_range(100, 1000);
@@ -440,7 +440,7 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
 {
        u32 status;
 
-       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
        if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
                return true;
 
@@ -616,7 +616,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
        return 0;
 }
 
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
        [MALIDP_500] = {
                .map = {
                        .coeffs_base = MALIDP500_COEFFS_BASE,
@@ -751,7 +751,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir
 {
        u32 base = malidp_get_block_base(hwdev, block);
 
-       if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+       if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
                malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
        else
                malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
@@ -762,12 +762,14 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
        struct drm_device *drm = arg;
        struct malidp_drm *malidp = drm->dev_private;
        struct malidp_hw_device *hwdev;
+       struct malidp_hw *hw;
        const struct malidp_irq_map *de;
        u32 status, mask, dc_status;
        irqreturn_t ret = IRQ_NONE;
 
        hwdev = malidp->dev;
-       de = &hwdev->map.de_irq_map;
+       hw = hwdev->hw;
+       de = &hw->map.de_irq_map;
 
        /*
         * if we are suspended it is likely that we were invoked because
@@ -778,8 +780,8 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
                return IRQ_NONE;
 
        /* first handle the config valid IRQ */
-       dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
-       if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+       dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+       if (dc_status & hw->map.dc_irq_map.vsync_irq) {
                /* we have a page flip event */
                atomic_set(&malidp->config_valid, 1);
                malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
@@ -832,11 +834,11 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
 
        /* first enable the DC block IRQs */
        malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
-                            hwdev->map.dc_irq_map.irq_mask);
+                            hwdev->hw->map.dc_irq_map.irq_mask);
 
        /* now enable the DE block IRQs */
        malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->map.de_irq_map.irq_mask);
+                            hwdev->hw->map.de_irq_map.irq_mask);
 
        return 0;
 }
@@ -847,9 +849,9 @@ void malidp_de_irq_fini(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-                             hwdev->map.de_irq_map.irq_mask);
+                             hwdev->hw->map.de_irq_map.irq_mask);
        malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
-                             hwdev->map.dc_irq_map.irq_mask);
+                             hwdev->hw->map.dc_irq_map.irq_mask);
 }
 
 static irqreturn_t malidp_se_irq(int irq, void *arg)
@@ -857,6 +859,8 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
        struct drm_device *drm = arg;
        struct malidp_drm *malidp = drm->dev_private;
        struct malidp_hw_device *hwdev = malidp->dev;
+       struct malidp_hw *hw = hwdev->hw;
+       const struct malidp_irq_map *se = &hw->map.se_irq_map;
        u32 status, mask;
 
        /*
@@ -867,12 +871,12 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
        if (hwdev->pm_suspended)
                return IRQ_NONE;
 
-       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
-       if (!(status & hwdev->map.se_irq_map.irq_mask))
+       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+       if (!(status & se->irq_mask))
                return IRQ_NONE;
 
-       mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
-       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+       mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
        status &= mask;
        /* ToDo: status decoding and firing up of VSYNC and page flip events */
 
@@ -905,7 +909,7 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
        }
 
        malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
-                            hwdev->map.se_irq_map.irq_mask);
+                            hwdev->hw->map.se_irq_map.irq_mask);
 
        return 0;
 }
@@ -916,5 +920,5 @@ void malidp_se_irq_fini(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
-                             hwdev->map.se_irq_map.irq_mask);
+                             hwdev->hw->map.se_irq_map.irq_mask);
 }
index 849ad9a30c3af4b31e98526b954e2b0819275534..b0690ebb356523781bfaf5a6fbbb4a38d39cb5e2 100644 (file)
@@ -120,18 +120,14 @@ struct malidp_hw_regmap {
 /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
 #define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
 
-struct malidp_hw_device {
-       const struct malidp_hw_regmap map;
-       void __iomem *regs;
+struct malidp_hw_device;
 
-       /* APB clock */
-       struct clk *pclk;
-       /* AXI clock */
-       struct clk *aclk;
-       /* main clock for display core */
-       struct clk *mclk;
-       /* pixel clock for display core */
-       struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+       const struct malidp_hw_regmap map;
 
        /*
         * Validate the driver instance against the hardware bits
@@ -182,15 +178,6 @@ struct malidp_hw_device {
                             struct videomode *vm);
 
        u8 features;
-
-       u8 min_line_size;
-       u16 max_line_size;
-
-       /* track the device PM state */
-       bool pm_suspended;
-
-       /* size of memory used for rotating layers, up to two banks available */
-       u32 rotation_memory[2];
 };
 
 /* Supported variants of the hardware */
@@ -202,7 +189,33 @@ enum {
        MALIDP_MAX_DEVICES
 };
 
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+       struct malidp_hw *hw;
+       void __iomem *regs;
+
+       /* APB clock */
+       struct clk *pclk;
+       /* AXI clock */
+       struct clk *aclk;
+       /* main clock for display core */
+       struct clk *mclk;
+       /* pixel clock for display core */
+       struct clk *pxlclk;
+
+       u8 min_line_size;
+       u16 max_line_size;
+
+       /* track the device PM state */
+       bool pm_suspended;
+
+       /* size of memory used for rotating layers, up to two banks available */
+       u32 rotation_memory[2];
+};
 
 static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
 {
@@ -240,9 +253,9 @@ static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
 {
        switch (block) {
        case MALIDP_SE_BLOCK:
-               return hwdev->map.se_base;
+               return hwdev->hw->map.se_base;
        case MALIDP_DC_BLOCK:
-               return hwdev->map.dc_base;
+               return hwdev->hw->map.dc_base;
        }
 
        return 0;
@@ -275,7 +288,7 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
 static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
                                         unsigned int pitch)
 {
-       return !(pitch & (hwdev->map.bus_align_bytes - 1));
+       return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
 }
 
 /* U16.16 */
@@ -308,8 +321,8 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
        };
        u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
                  MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
-       u32 image_enh = hwdev->map.se_base +
-                       ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+       u32 image_enh = hwdev->hw->map.se_base +
+                       ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
                         0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
        u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
        int i;
index 94e7e3fa3408cf163fda7f81b40e76ca73ac4c96..e7419797bbd16c157cab134184c24fd552cd2c44 100644 (file)
@@ -57,7 +57,7 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
        struct malidp_plane *mp = to_malidp_plane(plane);
 
        if (mp->base.fb)
-               drm_framebuffer_unreference(mp->base.fb);
+               drm_framebuffer_put(mp->base.fb);
 
        drm_plane_helper_disable(plane);
        drm_plane_cleanup(plane);
@@ -185,8 +185,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 
        fb = state->fb;
 
-       ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
-                                           fb->format->format);
+       ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+                                            mp->layer->id,
+                                            fb->format->format);
        if (ms->format == MALIDP_INVALID_FORMAT_ID)
                return -EINVAL;
 
@@ -211,7 +212,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
         * third plane stride register.
         */
        if (ms->n_planes == 3 &&
-           !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+           !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
            (state->fb->pitches[1] != state->fb->pitches[2]))
                return -EINVAL;
 
@@ -229,9 +230,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
-                                                state->crtc_w,
-                                                fb->format->format);
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+                                                    state->crtc_w,
+                                                    fb->format->format);
                if (val < 0)
                        return val;
 
@@ -251,7 +252,7 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
                return;
 
        if (num_planes == 3)
-               num_strides = (mp->hwdev->features &
+               num_strides = (mp->hwdev->hw->features &
                               MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
 
        for (i = 0; i < num_strides; ++i)
@@ -264,13 +265,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
                                   struct drm_plane_state *old_state)
 {
        struct malidp_plane *mp;
-       const struct malidp_hw_regmap *map;
        struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
        u32 src_w, src_h, dest_w, dest_h, val;
        int i;
 
        mp = to_malidp_plane(plane);
-       map = &mp->hwdev->map;
 
        /* convert src values from Q16 fixed point to integer */
        src_w = plane->state->src_w >> 16;
@@ -363,7 +362,7 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
 int malidp_de_planes_init(struct drm_device *drm)
 {
        struct malidp_drm *malidp = drm->dev_private;
-       const struct malidp_hw_regmap *map = &malidp->dev->map;
+       const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
        struct malidp_plane *plane = NULL;
        enum drm_plane_type plane_type;
        unsigned long crtcs = 1 << drm->mode_config.num_crtc;
index b4efcbabf7f726f6e790400a15dc57127f6b341d..d034b2cb5eee30ee0b1b2fc44d1b010c01263787 100644 (file)
@@ -372,9 +372,18 @@ struct adv7511 {
 };
 
 #ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-                    unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
 void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+       unsigned int offset = adv7511->type == ADV7533 ?
+                                               ADV7533_REG_CEC_OFFSET : 0;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_DRM_I2C_ADV7533
index b33d730e4d7366880574e345670bb8e9c3bb3e8b..a20a45c0b353f18eb9d2af13f2a5d707e7b1fc5c 100644 (file)
@@ -300,18 +300,21 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
        return 0;
 }
 
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-                    unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 {
+       unsigned int offset = adv7511->type == ADV7533 ?
+                                               ADV7533_REG_CEC_OFFSET : 0;
        int ret = adv7511_cec_parse_dt(dev, adv7511);
 
        if (ret)
-               return ret;
+               goto err_cec_parse_dt;
 
        adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
                adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
-       if (IS_ERR(adv7511->cec_adap))
-               return PTR_ERR(adv7511->cec_adap);
+       if (IS_ERR(adv7511->cec_adap)) {
+               ret = PTR_ERR(adv7511->cec_adap);
+               goto err_cec_alloc;
+       }
 
        regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
        /* cec soft reset */
@@ -329,9 +332,18 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
                     ((adv7511->cec_clk_freq / 750000) - 1) << 2);
 
        ret = cec_register_adapter(adv7511->cec_adap, dev);
-       if (ret) {
-               cec_delete_adapter(adv7511->cec_adap);
-               adv7511->cec_adap = NULL;
-       }
-       return ret;
+       if (ret)
+               goto err_cec_register;
+       return 0;
+
+err_cec_register:
+       cec_delete_adapter(adv7511->cec_adap);
+       adv7511->cec_adap = NULL;
+err_cec_alloc:
+       dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+                ret);
+err_cec_parse_dt:
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+       return ret == -EPROBE_DEFER ? ret : 0;
 }
index 0e14f1572d0593452d494a75dd712f7fb194d740..efa29db5fc2b7eeff375d2f6cdafce9340fd6301 100644 (file)
@@ -1084,7 +1084,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        struct device *dev = &i2c->dev;
        unsigned int main_i2c_addr = i2c->addr << 1;
        unsigned int edid_i2c_addr = main_i2c_addr + 4;
-       unsigned int offset;
        unsigned int val;
        int ret;
 
@@ -1192,24 +1191,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        if (adv7511->type == ADV7511)
                adv7511_set_link_config(adv7511, &link_config);
 
+       ret = adv7511_cec_init(dev, adv7511);
+       if (ret)
+               goto err_unregister_cec;
+
        adv7511->bridge.funcs = &adv7511_bridge_funcs;
        adv7511->bridge.of_node = dev->of_node;
 
        drm_bridge_add(&adv7511->bridge);
 
        adv7511_audio_init(dev, adv7511);
-
-       offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-       ret = adv7511_cec_init(dev, adv7511, offset);
-       if (ret)
-               goto err_unregister_cec;
-#else
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
-                    ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
        return 0;
 
 err_unregister_cec:
index 0903ba574f61c4d1ab40a0e18d77d852bd282c0a..75b0d3f6e4de919301b63af95b28d2b7af4bf79e 100644 (file)
 
 #include <linux/of_graph.h>
 
+struct lvds_encoder {
+       struct drm_bridge bridge;
+       struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+       struct lvds_encoder *lvds_encoder = container_of(bridge,
+                                                        struct lvds_encoder,
+                                                        bridge);
+
+       return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+                                bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+       .attach = lvds_encoder_attach,
+};
+
 static int lvds_encoder_probe(struct platform_device *pdev)
 {
        struct device_node *port;
        struct device_node *endpoint;
        struct device_node *panel_node;
        struct drm_panel *panel;
-       struct drm_bridge *bridge;
+       struct lvds_encoder *lvds_encoder;
+
+       lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+                                   GFP_KERNEL);
+       if (!lvds_encoder)
+               return -ENOMEM;
 
        /* Locate the panel DT node. */
        port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
-       if (IS_ERR(bridge))
-               return PTR_ERR(bridge);
+       lvds_encoder->panel_bridge =
+               devm_drm_panel_bridge_add(&pdev->dev,
+                                         panel, DRM_MODE_CONNECTOR_LVDS);
+       if (IS_ERR(lvds_encoder->panel_bridge))
+               return PTR_ERR(lvds_encoder->panel_bridge);
+
+       /* The panel_bridge bridge is attached to the panel's of_node,
+        * but we need a bridge attached to our of_node for our user
+        * to look up.
+        */
+       lvds_encoder->bridge.of_node = pdev->dev.of_node;
+       lvds_encoder->bridge.funcs = &funcs;
+       drm_bridge_add(&lvds_encoder->bridge);
 
-       platform_set_drvdata(pdev, bridge);
+       platform_set_drvdata(pdev, lvds_encoder);
 
        return 0;
 }
 
 static int lvds_encoder_remove(struct platform_device *pdev)
 {
-       struct drm_bridge *bridge = platform_get_drvdata(pdev);
+       struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
 
-       drm_bridge_remove(bridge);
+       drm_bridge_remove(&lvds_encoder->bridge);
 
        return 0;
 }
index bf14214fa4640279fa46b655333198ed5aa1446e..b72259bf6e2fb37ce155108a43f6e1ce49de91dc 100644 (file)
@@ -138,6 +138,7 @@ struct dw_hdmi {
        struct device *dev;
        struct clk *isfr_clk;
        struct clk *iahb_clk;
+       struct clk *cec_clk;
        struct dw_hdmi_i2c *i2c;
 
        struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev,
                goto err_isfr;
        }
 
+       hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+       if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+               hdmi->cec_clk = NULL;
+       } else if (IS_ERR(hdmi->cec_clk)) {
+               ret = PTR_ERR(hdmi->cec_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+                               ret);
+
+               hdmi->cec_clk = NULL;
+               goto err_iahb;
+       } else {
+               ret = clk_prepare_enable(hdmi->cec_clk);
+               if (ret) {
+                       dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+                               ret);
+                       goto err_iahb;
+               }
+       }
+
        /* Product and revision IDs */
        hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
                      | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@ err_iahb:
                cec_notifier_put(hdmi->cec_notifier);
 
        clk_disable_unprepare(hdmi->iahb_clk);
+       if (hdmi->cec_clk)
+               clk_disable_unprepare(hdmi->cec_clk);
 err_isfr:
        clk_disable_unprepare(hdmi->isfr_clk);
 err_res:
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
 
        clk_disable_unprepare(hdmi->iahb_clk);
        clk_disable_unprepare(hdmi->isfr_clk);
+       if (hdmi->cec_clk)
+               clk_disable_unprepare(hdmi->cec_clk);
 
        if (hdmi->i2c)
                i2c_del_adapter(&hdmi->i2c->adap);
index 8571cfd877c520b2e09530f1070a58dae8a4baf9..8636e7eeb7315c471eae9a01dad16d20133a441b 100644 (file)
@@ -97,7 +97,7 @@
 #define DP0_ACTIVEVAL          0x0650
 #define DP0_SYNCVAL            0x0654
 #define DP0_MISC               0x0658
-#define TU_SIZE_RECOMMENDED            (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED            (63) /* LSCLK cycles per TU */
 #define BPC_6                          (0 << 5)
 #define BPC_8                          (1 << 5)
 
@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
                                tmp = (tmp << 8) | buf[i];
                        i++;
                        if (((i % 4) == 0) || (i == size)) {
-                               tc_write(DP0_AUXWDATA(i >> 2), tmp);
+                               tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
                                tmp = 0;
                        }
                }
@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
        ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
        if (ret < 0)
                goto err_dpcd_read;
-       if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
-               goto err_dpcd_inval;
+       if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+               dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+               tc->link.base.rate = 270000;
+       }
+
+       if (tc->link.base.num_lanes > 2) {
+               dev_dbg(tc->dev, "Falling to 2 lanes\n");
+               tc->link.base.num_lanes = 2;
+       }
 
        ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
        if (ret < 0)
@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
 err_dpcd_read:
        dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
        return ret;
-err_dpcd_inval:
-       dev_err(tc->dev, "invalid DPCD\n");
-       return -EINVAL;
 }
 
 static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        int lower_margin = mode->vsync_start - mode->vdisplay;
        int vsync_len = mode->vsync_end - mode->vsync_start;
 
+       /*
+        * Recommended maximum number of symbols transferred in a transfer unit:
+        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+        *              (output active video bandwidth in bytes))
+        * Must be less than tu_size.
+        */
+       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
        dev_dbg(tc->dev, "set mode %dx%d\n",
                mode->hdisplay, mode->vdisplay);
        dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
 
 
-       /* LCD Ctl Frame Size */
-       tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+       /*
+        * LCD Ctl Frame Size
+        * datasheet is not clear of vsdelay in case of DPI
+        * assume we do not need any delay when DPI is a source of
+        * sync signals
+        */
+       tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
                 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
-       tc_write(HTIM01, (left_margin << 16) |          /* H back porch */
-                        (hsync_len << 0));             /* Hsync */
-       tc_write(HTIM02, (right_margin << 16) |         /* H front porch */
-                        (mode->hdisplay << 0));        /* width */
+       tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+                        (ALIGN(hsync_len, 2) << 0));    /* Hsync */
+       tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) |  /* H front porch */
+                        (ALIGN(mode->hdisplay, 2) << 0)); /* width */
        tc_write(VTIM01, (upper_margin << 16) |         /* V back porch */
                         (vsync_len << 0));             /* Vsync */
        tc_write(VTIM02, (lower_margin << 16) |         /* V front porch */
@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        /* DP Main Stream Attributes */
        vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
        tc_write(DP0_VIDSYNCDELAY,
-                (0x003e << 16) |       /* thresh_dly */
+                (max_tu_symbol << 16) |        /* thresh_dly */
                 (vid_sync_dly << 0));
 
        tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
                 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
 
-       /*
-        * Recommended maximum number of symbols transferred in a transfer unit:
-        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
-        *              (output active video bandwidth in bytes))
-        * Must be less than tu_size.
-        */
-       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
-       tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+       tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+                          BPC_8);
 
        return 0;
 err:
@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
        unsigned int rate;
        u32 dp_phy_ctrl;
        int timeout;
-       bool aligned;
-       bool ready;
        u32 value;
        int ret;
        u8 tmp[8];
@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
                ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
                if (ret < 0)
                        goto err_dpcd_read;
-               ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
-                                    DP_CHANNEL_EQ_BITS));      /* Lane0 */
-               aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
-       } while ((--timeout) && !(ready && aligned));
+       } while ((--timeout) &&
+                !(drm_dp_channel_eq_ok(tmp + 2,  tc->link.base.num_lanes)));
 
        if (timeout == 0) {
                /* Read DPCD 0x200-0x201 */
                ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
                if (ret < 0)
                        goto err_dpcd_read;
+               dev_err(dev, "channel(s) EQ not ok\n");
                dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
                dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
                         tmp[1]);
@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
                dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
                         tmp[6]);
 
-               if (!ready)
-                       dev_err(dev, "Lane0/1 not ready\n");
-               if (!aligned)
-                       dev_err(dev, "Lane0/1 not aligned\n");
                return -EAGAIN;
        }
 
@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
 static int tc_connector_mode_valid(struct drm_connector *connector,
                                   struct drm_display_mode *mode)
 {
-       /* Accept any mode */
+       /* DPI interface clock limitation: upto 154 MHz */
+       if (mode->clock > 154000)
+               return MODE_CLOCK_HIGH;
+
        return MODE_OK;
 }
 
index 71d712f1b56a285bac904b0d1e74a7363d766af3..b16f1d69a0bbf345e33e277de5ce73a38974bf73 100644 (file)
@@ -1225,7 +1225,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                return;
 
        for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+               if (!new_crtc_state->active)
                        continue;
 
                ret = drm_crtc_vblank_get(crtc);
index 07374008f146fa946750b1cfd3bf13bfd1b9c47d..e561663344559b065643464cf669d8b84930007f 100644 (file)
@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 
        if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
                DRM_INFO("Cannot find any crtc or sizes\n");
+
+               /* First time: disable all crtc's.. */
+               if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+                       restore_fbdev_mode(fb_helper);
                return -EAGAIN;
        }
 
index 3c318439a65967366e106b9542ffa757bb540c07..355120865efd14873726e8eae2e1ec6d6fb31b9f 100644 (file)
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
                                    int type, unsigned int resolution)
 {
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
        if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
        port->type = type;
 
        emulate_monitor_status_change(vgpu);
+       vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
        return 0;
 }
 
index 4427be18e4a93c72eaccae1fc263b044279c5fe8..940cdaaa3f2456009d5b90a5dd5595924324a993 100644 (file)
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
                goto err_unpin_mm;
        }
 
+       ret = intel_gvt_generate_request(workload);
+       if (ret) {
+               gvt_vgpu_err("fail to generate request\n");
+               goto err_unpin_mm;
+       }
+
        ret = prepare_shadow_batch_buffer(workload);
        if (ret) {
                gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
index 2801d70579d8cd3839ae904386127d4007efd3de..8e331142badbcbad4ceebbd0c0b2e2fa2fb8584c 100644 (file)
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
 
 #define GTT_HAW 46
 
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
 
 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 {
index a5bed2e71b9260afbe5ee3db7bb6649f29b826a2..44cd5ff5e97dae3282d7f8358675ddbd98c09c07 100644 (file)
@@ -1381,40 +1381,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
 }
 
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
-{
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       u32 v = *(u32 *)p_data;
-
-       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
-               return intel_vgpu_default_mmio_write(vgpu,
-                               offset, p_data, bytes);
-
-       switch (offset) {
-       case 0x4ddc:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
-               break;
-       case 0x42080:
-               /* bypass WaCompressedResourceDisplayNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
-               break;
-       case 0xe194:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
-               break;
-       case 0x7014:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
@@ -1671,8 +1637,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2530,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2580,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
-       MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+       MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
        MMIO_D(0x45504, D_SKL_PLUS);
        MMIO_D(0x45520, D_SKL_PLUS);
        MMIO_D(0x46000, D_SKL_PLUS);
index f6ded475bb2cc4dec19697b01e1e37c2c015f7dd..3ac1dc97a7a067f7f040a13935d3801c17330565 100644 (file)
@@ -140,9 +140,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        enum intel_engine_id ring_id = req->engine->id;
        struct intel_vgpu_workload *workload;
+       unsigned long flags;
 
        if (!is_gvt_request(req)) {
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (action == INTEL_CONTEXT_SCHEDULE_IN &&
                    scheduler->engine_owner[ring_id]) {
                        /* Switch ring from vGPU to host. */
@@ -150,7 +151,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                                              NULL, ring_id);
                        scheduler->engine_owner[ring_id] = NULL;
                }
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
 
                return NOTIFY_OK;
        }
@@ -161,7 +162,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 
        switch (action) {
        case INTEL_CONTEXT_SCHEDULE_IN:
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (workload->vgpu != scheduler->engine_owner[ring_id]) {
                        /* Switch ring from host to vGPU or vGPU to vGPU. */
                        intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,7 +171,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                } else
                        gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
                                      ring_id, workload->vgpu->id);
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT:
@@ -253,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct drm_i915_gem_request *rq;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_ring *ring;
        int ret;
@@ -299,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = populate_shadow_context(workload);
        if (ret)
                goto err_unpin;
+       workload->shadowed = true;
+       return 0;
+
+err_unpin:
+       engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+       release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+       return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+       int ring_id = workload->ring_id;
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+       struct drm_i915_gem_request *rq;
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+       int ret;
 
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
@@ -313,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = copy_workload_to_ring_buffer(workload);
        if (ret)
                goto err_unpin;
-       workload->shadowed = true;
        return 0;
 
 err_unpin:
        engine->context_unpin(engine, shadow_ctx);
-err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
        return ret;
 }
 
index 2d694f6c09076a31efc3165dc074312527450345..b9f872204d7e73fb48cd865641d9feee861fbbcc 100644 (file)
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
 
 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
 #endif
index e2993857df37bb365225f2dcf1797d389ae73fb4..888b7d3f04c303412ce7de74d86f216a9434582d 100644 (file)
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
 
        if (has_transparent_hugepage()) {
                struct super_block *sb = gemfs->mnt_sb;
-               char options[] = "huge=within_size";
+               /* FIXME: Disabled until we get W/A for read BW issue. */
+               char options[] = "huge=never";
                int flags = 0;
                int err;
 
index 7bc60c848940f95d1352ac79b7040867fa6268a3..6c7f8bca574eb4414f9a1d95894a5e10c25882b5 100644 (file)
@@ -1736,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
 int intel_backlight_device_register(struct intel_connector *connector);
 void intel_backlight_device_unregister(struct intel_connector *connector);
 #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
 {
        return 0;
 }
index b8af35187d226df3273380448aded0f5bc0d7c7c..ea96682568e880077fec27e489b3d345c44953ae 100644 (file)
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
 
        /* Due to peculiar init order wrt to hpd handling this is separate. */
        if (drm_fb_helper_initial_config(&ifbdev->helper,
-                                        ifbdev->preferred_bpp)) {
+                                        ifbdev->preferred_bpp))
                intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-               intel_fbdev_fini(to_i915(ifbdev->helper.dev));
-       }
 }
 
 void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
-       if (ifbdev)
+       if (!ifbdev)
+               return;
+
+       intel_fbdev_sync(ifbdev);
+       if (ifbdev->vma)
                drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
index eb5827110d8ffca08cf6b46e6671265a43f40307..49fdf09f9919c8f29d85f83ee0f3f731796f7713 100644 (file)
@@ -438,7 +438,9 @@ static bool
 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
 {
        return (i + 1 < num &&
-               !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+               msgs[i].addr == msgs[i + 1].addr &&
+               !(msgs[i].flags & I2C_M_RD) &&
+               (msgs[i].len == 1 || msgs[i].len == 2) &&
                (msgs[i + 1].flags & I2C_M_RD));
 }
 
index 93c7e3f9b4a88d776959be4bf8db22c6c6ac4d97..17d2f3a1c562bcb34e132c855965d1b2ace89726 100644 (file)
@@ -133,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
                        plane_disabling = true;
        }
 
-       if (plane_disabling) {
-               drm_atomic_helper_wait_for_vblanks(dev, state);
+       /*
+        * The flip done wait is only strictly required by imx-drm if a deferred
+        * plane disable is in-flight. As the core requires blocking commits
+        * to wait for the flip it is done here unconditionally. This keeps the
+        * workitem around a bit longer than required for the majority of
+        * non-blocking commits, but we accept that for the sake of simplicity.
+        */
+       drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       if (plane_disabling) {
                for_each_old_plane_in_state(state, plane, old_plane_state, i)
                        ipu_plane_disable_deferred(plane);
 
index c226da145fb3cd62c3bd91ac3070850f0429f474..a349cb61961e03672c0bd23b489c7d9f1ccbbbed 100644 (file)
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
 
 config DRM_OMAP_PANEL_DPI
        tristate "Generic DPI panel"
+       depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DPI panels.
 
index daf286fc8a4082a7fc7f8f8bf6bf38ab74362c09..ca1e3b489540fe8171b7eaa1d6839963a2533e08 100644 (file)
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
 }
 
 static const struct soc_device_attribute dpi_soc_devices[] = {
-       { .family = "OMAP3[456]*" },
-       { .family = "[AD]M37*" },
+       { .machine = "OMAP3[456]*" },
+       { .machine = "[AD]M37*" },
        { /* sentinel */ }
 };
 
index d86873f2abe6a57897dd62f324eefd0322c7a669..e626eddf24d5e2231c2434a76d45ddd29067c6e4 100644 (file)
@@ -352,7 +352,7 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
 {
        const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
                         CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
-       unsigned int ret;
+       int ret;
 
        core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
                "omap4", caps, CEC_MAX_LOG_ADDRS);
index 62e451162d96f6fe4064c05bf60eaee430d15788..b06f9956e73321352eb048a6b54d54957db42f37 100644 (file)
@@ -886,25 +886,36 @@ struct hdmi4_features {
        bool audio_use_mclk;
 };
 
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
        .cts_swmode = false,
        .audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
        .cts_swmode = true,
        .audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
        .cts_swmode = true,
        .audio_use_mclk = true,
 };
 
 static const struct soc_device_attribute hdmi4_soc_devices[] = {
-       { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
-       { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
-       { .family = "OMAP4",                      .data = &hdmi4_es3_features },
+       {
+               .machine = "OMAP4430",
+               .revision = "ES1.?",
+               .data = &hdmi4430_es1_features,
+       },
+       {
+               .machine = "OMAP4430",
+               .revision = "ES2.?",
+               .data = &hdmi4430_es2_features,
+       },
+       {
+               .family = "OMAP4",
+               .data = &hdmi4_features,
+       },
        { /* sentinel */ }
 };
 
index 1dd3dafc59afd25b5048ac105eb7627bafb16c9a..c60a85e82c6d8a529aa40062fdde21354d73b505 100644 (file)
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
                match = of_match_node(dmm_of_match, dev->dev.of_node);
                if (!match) {
                        dev_err(&dev->dev, "failed to find matching device node\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto fail;
                }
 
                omap_dmm->plat_data = match->data;
index 898f9a07883043bb7d7238016b4744a2c95bc1ac..a6511918f632586372a90e430c8ac5128a44b03d 100644 (file)
@@ -5451,28 +5451,6 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
        WREG32(VM_INVALIDATE_REQUEST, 0x1);
 }
 
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
-       int i;
-       uint32_t sh_mem_bases, sh_mem_config;
-
-       sh_mem_bases = 0x6000 | 0x6000 << 16;
-       sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
-       sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
-       mutex_lock(&rdev->srbm_mutex);
-       for (i = 8; i < 16; i++) {
-               cik_srbm_select(rdev, 0, 0, 0, i);
-               /* CP and shaders */
-               WREG32(SH_MEM_CONFIG, sh_mem_config);
-               WREG32(SH_MEM_APE1_BASE, 1);
-               WREG32(SH_MEM_APE1_LIMIT, 0);
-               WREG32(SH_MEM_BASES, sh_mem_bases);
-       }
-       cik_srbm_select(rdev, 0, 0, 0, 0);
-       mutex_unlock(&rdev->srbm_mutex);
-}
-
 /**
  * cik_pcie_gart_enable - gart enable
  *
@@ -5586,8 +5564,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        cik_srbm_select(rdev, 0, 0, 0, 0);
        mutex_unlock(&rdev->srbm_mutex);
 
-       cik_pcie_init_compute_vmid(rdev);
-
        cik_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(rdev->mc.gtt_size >> 20),
index b15755b6129c2b3aa37e016f5d11f97da6427ba8..b1fe0639227e4bc499fb581ca494217f084a7c6f 100644 (file)
@@ -1285,8 +1285,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
                goto err_pllref;
        }
 
-       pm_runtime_enable(dev);
-
        dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
        dsi->dsi_host.dev = dev;
        ret = mipi_dsi_host_register(&dsi->dsi_host);
@@ -1301,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
        }
 
        dev_set_drvdata(dev, dsi);
+       pm_runtime_enable(dev);
        return 0;
 
 err_mipi_dsi_host:
index b0551aa677b82fcb3ca5e26c72e6d612bee9447a..8d7172e8381d2685cc2bdedd4c631d83536df3a8 100644 (file)
@@ -1062,7 +1062,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
 }
 EXPORT_SYMBOL(ttm_pool_unpopulate);
 
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
 {
        unsigned i, j;
@@ -1133,7 +1132,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
        ttm_pool_unpopulate(&tt->ttm);
 }
 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-#endif
 
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
index 5f11dc014ed619918afd11c9e7ac79fc66d77d96..e5234f953a6d16213920df252ac8ef23d857f924 100644 (file)
@@ -22,6 +22,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
 #define JC42_REG_TEMP          0x05
 #define JC42_REG_MANID         0x06
 #define JC42_REG_DEVICEID      0x07
+#define JC42_REG_SMBUS         0x22 /* NXP and Atmel, possibly others? */
 
 /* Status bits in temperature register */
 #define JC42_ALARM_CRIT_BIT    15
@@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
 #define GT_MANID               0x1c68  /* Giantec */
 #define GT_MANID2              0x132d  /* Giantec, 2nd mfg ID */
 
+/* SMBUS register */
+#define SMBUS_STMOUT           BIT(7)  /* SMBus time-out, active low */
+
 /* Supported chips */
 
 /* Analog Devices */
@@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        data->extended = !!(cap & JC42_CAP_RANGE);
 
+       if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+               int smbus;
+
+               /*
+                * Not all chips support this register, but from a
+                * quick read of various datasheets no chip appears
+                * incompatible with the below attempt to disable
+                * the timeout. And the whole thing is opt-in...
+                */
+               smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+               if (smbus < 0)
+                       return smbus;
+               i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+                                            smbus | SMBUS_STMOUT);
+       }
+
        config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
        if (config < 0)
                return config;
index 52a58b8b6e1bd002f6b91b17e1c06cda94a1069b..a139940cd991a39544feb1cc8f529a7c92e4bba8 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
@@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
 static long pmbus_reg2data_direct(struct pmbus_data *data,
                                  struct pmbus_sensor *sensor)
 {
-       long val = (s16) sensor->data;
-       long m, b, R;
+       s64 b, val = (s16)sensor->data;
+       s32 m, R;
 
        m = data->info->m[sensor->class];
        b = data->info->b[sensor->class];
@@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
                R--;
        }
        while (R < 0) {
-               val = DIV_ROUND_CLOSEST(val, 10);
+               val = div_s64(val + 5LL, 10L);  /* round closest */
                R++;
        }
 
-       return (val - b) / m;
+       val = div_s64(val - b, m);
+       return clamp_val(val, LONG_MIN, LONG_MAX);
 }
 
 /*
@@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
 static u16 pmbus_data2reg_direct(struct pmbus_data *data,
                                 struct pmbus_sensor *sensor, long val)
 {
-       long m, b, R;
+       s64 b, val64 = val;
+       s32 m, R;
 
        m = data->info->m[sensor->class];
        b = data->info->b[sensor->class];
@@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
                R -= 3;         /* Adjust R and b for data in milli-units */
                b *= 1000;
        }
-       val = val * m + b;
+       val64 = val64 * m + b;
 
        while (R > 0) {
-               val *= 10;
+               val64 *= 10;
                R--;
        }
        while (R < 0) {
-               val = DIV_ROUND_CLOSEST(val, 10);
+               val64 = div_s64(val64 + 5LL, 10L);  /* round closest */
                R++;
        }
 
-       return val;
+       return (u16)clamp_val(val64, S16_MIN, S16_MAX);
 }
 
 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
index 9e12a53ef7b8cf2cdccf9de473af8e2cec9c5f36..8eac00efadc1ad8f8e477094e26790a1ec317117 100644 (file)
@@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        /* Default timeout in interrupt mode: 200 ms */
        priv->adapter.timeout = HZ / 5;
 
+       if (dev->irq == IRQ_NOTCONNECTED)
+               priv->features &= ~FEATURE_IRQ;
+
        if (priv->features & FEATURE_IRQ) {
                u16 pcictl, pcists;
 
index 31186ead5a40717dc491753eb1ebd37f7a1b0e52..509a6007cdf659129917d4dd156983509bc3353b 100644 (file)
@@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
                                        property_entries_dup(info->properties);
                        if (IS_ERR(devinfo->board_info.properties)) {
                                status = PTR_ERR(devinfo->board_info.properties);
+                               kfree(devinfo);
                                break;
                        }
                }
@@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
                                        GFP_KERNEL);
                        if (!devinfo->board_info.resources) {
                                status = -ENOMEM;
+                               kfree(devinfo);
                                break;
                        }
                }
index 21e60b1e2ff41b1c27e98ebad68e5f4b0ccb7f42..130606c3b07c15f03e5481b1cf22831a7c9a8e85 100644 (file)
@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        sg_list_start = umem->sg_head.sgl;
 
        while (npages) {
-               ret = get_user_pages(cur_base,
+               ret = get_user_pages_longterm(cur_base,
                                     min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
                                     gup_flags, page_list, vma_list);
index a27d85232ce1343ce802576ab3584ed67be6f35d..a0cc1bc6d88445a3ced1e5dcd1906100b5e619c6 100644 (file)
@@ -490,7 +490,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                if (b == -1)
                        goto err;
 
-               k->ptr[i] = PTR(ca->buckets[b].gen,
+               k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
                                bucket_to_sector(c, b),
                                ca->sb.nr_this_dev);
 
index 11c5503d31dc3029df2cde14f8f9e9fc48514bc2..81e8dc3dbe5e30604438ca0658d39900cde7e2c8 100644 (file)
@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
        c->shrink.scan_objects = bch_mca_scan;
        c->shrink.seeks = 4;
        c->shrink.batch = c->btree_pages * 2;
-       register_shrinker(&c->shrink);
+
+       if (register_shrinker(&c->shrink))
+               pr_warn("bcache: %s: could not register shrinker",
+                               __func__);
 
        return 0;
 }
index 41c238fc37338073632c017d705143b62e344e6f..f9d391711595fb87c479251fbf382459174d7e76 100644 (file)
@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
                return false;
 
        for (i = 0; i < KEY_PTRS(l); i++)
-               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+               if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
                    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
                        return false;
 
index 02a98ddb592d3b7aaaf0f12cc31b6a5937988ffe..a87165c1d8e5262d01962eb706b60ff9fd02cb78 100644 (file)
@@ -170,6 +170,11 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
                 * find a sequence of buckets with valid journal entries
                 */
                for (i = 0; i < ca->sb.njournal_buckets; i++) {
+                       /*
+                        * We must try the index l with ZERO first for
+                        * correctness due to the scenario that the journal
+                        * bucket is circular buffer which might have wrapped
+                        */
                        l = (i * 2654435769U) % ca->sb.njournal_buckets;
 
                        if (test_bit(l, bitmap))
@@ -507,7 +512,7 @@ static void journal_reclaim(struct cache_set *c)
                        continue;
 
                ja->cur_idx = next;
-               k->ptr[n++] = PTR(0,
+               k->ptr[n++] = MAKE_PTR(0,
                                  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
                                  ca->sb.nr_this_dev);
        }
index 3a7aed7282b2a0227e9f04cec6d82404bad55f23..643c3021624faa1fc6af0b84bdf49d07135d6238 100644 (file)
@@ -708,16 +708,15 @@ static void cached_dev_read_error(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
        struct bio *bio = &s->bio.bio;
-       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        /*
-        * If cache device is dirty (dc->has_dirty is non-zero), then
-        * recovery a failed read request from cached device may get a
-        * stale data back. So read failure recovery is only permitted
-        * when cache device is clean.
+        * If read request hit dirty data (s->read_dirty_data is true),
+        * then recovery a failed read request from cached device may
+        * get a stale data back. So read failure recovery is only
+        * permitted when read request hit clean data in cache device,
+        * or when cache read race happened.
         */
-       if (s->recoverable &&
-           (dc && !atomic_read(&dc->has_dirty))) {
+       if (s->recoverable && !s->read_dirty_data) {
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
index 0b5c43f7e020da59c939369ebd9d0a27a116ce2b..f412429cf5ba586958a3693a35eee48ad7043c51 100644 (file)
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
        dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
                data, size, dma->nr_pages);
 
-       err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+       err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
                             flags, dma->pages, NULL);
 
        if (err != dma->nr_pages) {
                dma->nr_pages = (err >= 0) ? err : 0;
-               dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+               dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+                       dma->nr_pages);
                return err < 0 ? err : -EINVAL;
        }
        return 0;
index bb7fd3f4edab7f4ce4a45854dade29a0b113ae14..19969ee86d6f781c64f2acf632781a600c647c0d 100644 (file)
@@ -2083,6 +2083,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
        /* There should only be one entry, but go through the list
         * anyway
         */
+       if (afu->phb == NULL)
+               return result;
+
        list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                if (!afu_dev->driver)
                        continue;
@@ -2124,8 +2127,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
                         * Tell the AFU drivers; but we don't care what they
                         * say, we're going away.
                         */
-                       if (afu->phb != NULL)
-                               cxl_vphb_error_detected(afu, state);
+                       cxl_vphb_error_detected(afu, state);
                }
                return PCI_ERS_RESULT_DISCONNECT;
        }
@@ -2265,6 +2267,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
                if (cxl_afu_select_best_mode(afu))
                        goto err;
 
+               if (afu->phb == NULL)
+                       continue;
+
                list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                        /* Reset the device context.
                         * TODO: make this less disruptive
@@ -2327,6 +2332,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
        for (i = 0; i < adapter->slices; i++) {
                afu = adapter->afu[i];
 
+               if (afu->phb == NULL)
+                       continue;
+
                list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                        if (afu_dev->driver && afu_dev->driver->err_handler &&
                            afu_dev->driver->err_handler->resume)
index e0b4b36ef01052ea1dd368b33e2170be83bda714..305a7a464d091614978e37b26a0573f66a27e8b8 100644 (file)
@@ -425,7 +425,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
        memset(msg, 0, sizeof(msg));
        msg[0].addr = client->addr;
        msg[0].buf = addrbuf;
-       addrbuf[0] = 0x90 + offset;
+       /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
+       addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
        msg[0].len = 1;
        msg[1].addr = client->addr;
        msg[1].flags = I2C_M_RD;
@@ -568,6 +569,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
        if (unlikely(!count))
                return count;
 
+       if (off + count > at24->chip.byte_len)
+               return -EINVAL;
+
        client = at24_translate_offset(at24, &off);
 
        ret = pm_runtime_get_sync(&client->dev);
@@ -613,6 +617,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
        if (unlikely(!count))
                return -EINVAL;
 
+       if (off + count > at24->chip.byte_len)
+               return -EINVAL;
+
        client = at24_translate_offset(at24, &off);
 
        ret = pm_runtime_get_sync(&client->dev);
@@ -730,6 +737,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
                dev_warn(&client->dev,
                        "page_size looks suspicious (no power of 2)!\n");
 
+       /*
+        * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
+        * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
+        *
+        * Eventually we'll get rid of the magic values altoghether in favor of
+        * real structs, but for now just manually set the right size.
+        */
+       if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
+               chip.byte_len = 6;
+
        /* Use I2C operations unless we're stuck with SMBus extensions. */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
                if (chip.flags & AT24_FLAG_ADDR16)
index ea80ff4cd7f99bc27aab2951ea8d3f9d67624912..ccfa98af1dd3fc93fdaa52b6ced6db3dc0f9db79 100644 (file)
@@ -122,6 +122,10 @@ struct mmc_blk_data {
        struct device_attribute force_ro;
        struct device_attribute power_ro_lock;
        int     area_type;
+
+       /* debugfs files (only in main mmc_blk_data) */
+       struct dentry *status_dentry;
+       struct dentry *ext_csd_dentry;
 };
 
 /* Device type for RPMB character devices */
@@ -233,9 +237,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
 
        /* Dispatch locking to the block layer */
        req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               count = PTR_ERR(req);
+               goto out_put;
+       }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
        blk_execute_rq(mq->queue, NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
+       blk_put_request(req);
 
        if (!ret) {
                pr_info("%s: Locking boot partition ro until next power on\n",
@@ -248,7 +257,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
                                set_disk_ro(part_md->disk, 1);
                        }
        }
-
+out_put:
        mmc_blk_put(md);
        return count;
 }
@@ -624,6 +633,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
        req = blk_get_request(mq->queue,
                idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
                __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto cmd_done;
+       }
        idatas[0] = idata;
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
@@ -691,6 +704,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
        req = blk_get_request(mq->queue,
                idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
                __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto cmd_err;
+       }
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
        req_to_mmc_queue_req(req)->drv_op_data = idata;
@@ -2550,6 +2567,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
 
        /* Ask the block layer about the card status */
        req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
        blk_execute_rq(mq->queue, NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
@@ -2557,6 +2576,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
                *val = ret;
                ret = 0;
        }
+       blk_put_request(req);
 
        return ret;
 }
@@ -2583,10 +2603,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
 
        /* Ask the block layer for the EXT CSD */
        req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out_free;
+       }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
        req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
        blk_execute_rq(mq->queue, NULL, req, 0);
        err = req_to_mmc_queue_req(req)->drv_op_result;
+       blk_put_request(req);
        if (err) {
                pr_err("FAILED %d\n", err);
                goto out_free;
@@ -2632,7 +2657,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
        .llseek         = default_llseek,
 };
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
        struct dentry *root;
 
@@ -2642,28 +2667,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
        root = card->debugfs_root;
 
        if (mmc_card_mmc(card) || mmc_card_sd(card)) {
-               if (!debugfs_create_file("status", S_IRUSR, root, card,
-                                        &mmc_dbg_card_status_fops))
+               md->status_dentry =
+                       debugfs_create_file("status", S_IRUSR, root, card,
+                                           &mmc_dbg_card_status_fops);
+               if (!md->status_dentry)
                        return -EIO;
        }
 
        if (mmc_card_mmc(card)) {
-               if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
-                                        &mmc_dbg_ext_csd_fops))
+               md->ext_csd_dentry =
+                       debugfs_create_file("ext_csd", S_IRUSR, root, card,
+                                           &mmc_dbg_ext_csd_fops);
+               if (!md->ext_csd_dentry)
                        return -EIO;
        }
 
        return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+                                  struct mmc_blk_data *md)
+{
+       if (!card->debugfs_root)
+               return;
+
+       if (!IS_ERR_OR_NULL(md->status_dentry)) {
+               debugfs_remove(md->status_dentry);
+               md->status_dentry = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
+               debugfs_remove(md->ext_csd_dentry);
+               md->ext_csd_dentry = NULL;
+       }
+}
 
 #else
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
        return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+                                  struct mmc_blk_data *md)
+{
+}
+
 #endif /* CONFIG_DEBUG_FS */
 
 static int mmc_blk_probe(struct mmc_card *card)
@@ -2703,7 +2753,7 @@ static int mmc_blk_probe(struct mmc_card *card)
        }
 
        /* Add two debugfs entries */
-       mmc_blk_add_debugfs(card);
+       mmc_blk_add_debugfs(card, md);
 
        pm_runtime_set_autosuspend_delay(&card->dev, 3000);
        pm_runtime_use_autosuspend(&card->dev);
@@ -2729,6 +2779,7 @@ static void mmc_blk_remove(struct mmc_card *card)
 {
        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
 
+       mmc_blk_remove_debugfs(card, md);
        mmc_blk_remove_parts(card, md);
        pm_runtime_get_sync(&card->dev);
        mmc_claim_host(card->host);
index a4b49e25fe963b135d71c0532ce0bf5c8951a3cd..7586ff2ad1f17274f764a62d3622f9c8d2209c0b 100644 (file)
@@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
                return ret;
 
        ret = host->bus_ops->suspend(host);
+       if (ret)
+               pm_generic_resume(dev);
+
        return ret;
 }
 
index 01e459a34f3321046498110a6815a126b27a8a5e..0f4a7d7b26261486e6ff43bea47d756ad5870843 100644 (file)
@@ -314,4 +314,5 @@ err:
 void mmc_remove_card_debugfs(struct mmc_card *card)
 {
        debugfs_remove_recursive(card->debugfs_root);
+       card->debugfs_root = NULL;
 }
index a552f61060d2127d2539f73abf0f3690829c0898..d209fb466979015d3d668beab293e60b9657c0fc 100644 (file)
@@ -781,7 +781,7 @@ MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
-MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
        card->ext_csd.device_life_time_est_typ_a,
        card->ext_csd.device_life_time_est_typ_b);
@@ -791,7 +791,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
index 45bf78f327163e009d6dc2abd15b0c69ca06071c..62b84dd8f9fe3467d4c69c925c0082efb1231a22 100644 (file)
@@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 
 static ssize_t mmc_dsr_show(struct device *dev,
index 3fb7d2eec93f4d72d63dbc5983e47a2b8dbc710d..c283291db705238dab53ee0104808a198aa4288e 100644 (file)
@@ -29,6 +29,9 @@
 #define CORE_VERSION_MAJOR_MASK                (0xf << CORE_VERSION_MAJOR_SHIFT)
 #define CORE_VERSION_MINOR_MASK                0xff
 
+#define CORE_MCI_GENERICS              0x70
+#define SWITCHABLE_SIGNALING_VOLTAGE   BIT(29)
+
 #define CORE_HC_MODE           0x78
 #define HC_MODE_EN             0x1
 #define CORE_POWER             0x0
@@ -1028,11 +1031,22 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        bool done = false;
+       u32 val;
 
        pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
                        mmc_hostname(host->mmc), __func__, req_type,
                        msm_host->curr_pwr_state, msm_host->curr_io_level);
 
+       /*
+        * The power interrupt will not be generated for signal voltage
+        * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+        */
+       val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+       if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
+           !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
+               return;
+       }
+
        /*
         * The IRQ for request type IO High/LOW will be generated when -
         * there is a state change in 1.8V enable bit (bit 3) of
index 2f14334e42df91135fc88fdc474c0d318c4dfad3..e9290a3439d54c2a82ffd2533fc61e4d54637296 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
@@ -3650,23 +3651,30 @@ int sdhci_setup_host(struct sdhci_host *host)
 
        spin_lock_init(&host->lock);
 
+       /*
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
+        */
+       mmc->max_req_size = 524288;
+
        /*
         * Maximum number of segments. Depends on if the hardware
         * can do scatter/gather or not.
         */
-       if (host->flags & SDHCI_USE_ADMA)
+       if (host->flags & SDHCI_USE_ADMA) {
                mmc->max_segs = SDHCI_MAX_SEGS;
-       else if (host->flags & SDHCI_USE_SDMA)
+       } else if (host->flags & SDHCI_USE_SDMA) {
                mmc->max_segs = 1;
-       else /* PIO */
+               if (swiotlb_max_segment()) {
+                       unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
+                                               IO_TLB_SEGSIZE;
+                       mmc->max_req_size = min(mmc->max_req_size,
+                                               max_req_size);
+               }
+       } else { /* PIO */
                mmc->max_segs = SDHCI_MAX_SEGS;
-
-       /*
-        * Maximum number of sectors in one transfer. Limited by SDMA boundary
-        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
-        * is less anyway.
-        */
-       mmc->max_req_size = 524288;
+       }
 
        /*
         * Maximum segment size. Could be one segment with the maximum number
index a13a4896a8bddad19ae48f8c58bbaf2f3c8dce84..0626dcfd1f3d83ceaad91968cffd65370189cfac 100644 (file)
  * Below is some version info we got:
  *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
  *                                Filter? connected?  Passive detection  ception in MB
- *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX25  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
- *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
  *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
- *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
+ *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
 
 static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
index b4efd711f824ccd1c832af8817e09bf2e00b2b5c..788c3464a3b0e95aaa101591750b9de493a34a18 100644 (file)
@@ -825,7 +825,10 @@ err_release_regions:
 err_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 /* free the board structure object, as well as its resources: */
index 131026fbc2d77cbc3ccb5903daa10f8920f8ae17..5adc95c922eef2d9f968a2dea3bac7c2dd3bfda2 100644 (file)
@@ -717,7 +717,10 @@ failure_release_regions:
 failure_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 static void peak_pci_remove(struct pci_dev *pdev)
index 4d4941469cfc06bfff3aeafaa0c3562b63702730..db6ea936dc3fc3fca00c939b2db6a938cf5011dc 100644 (file)
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
                mbx_mask = hecc_read(priv, HECC_CANMIM);
                mbx_mask |= HECC_TX_MBOX_MASK;
                hecc_write(priv, HECC_CANMIM, mbx_mask);
+       } else {
+               /* repoll is done only if whole budget is used */
+               num_pkts = quota;
        }
 
        return num_pkts;
index 9b18d96ef52633ab34bb5ff39f4f643023dc308a..f95945915d209df8353645c6390702d3e67d694d 100644 (file)
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                        }
 
                        if (pos + tmp->len > actual_len) {
-                               dev_err(dev->udev->dev.parent,
-                                       "Format error\n");
+                               dev_err_ratelimited(dev->udev->dev.parent,
+                                                   "Format error\n");
                                break;
                        }
 
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
        if (err) {
                netdev_err(netdev, "Error transmitting URB\n");
                usb_unanchor_urb(urb);
+               kfree(buf);
                usb_free_urb(urb);
                return err;
        }
@@ -1333,7 +1334,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                goto resubmit_urb;
        }
 
-       while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+       while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
                msg = urb->transfer_buffer + pos;
 
                /* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1353,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                }
 
                if (pos + msg->len > urb->actual_length) {
-                       dev_err(dev->udev->dev.parent, "Format error\n");
+                       dev_err_ratelimited(dev->udev->dev.parent,
+                                           "Format error\n");
                        break;
                }
 
@@ -1768,6 +1770,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
                usb_unanchor_urb(urb);
+               kfree(buf);
 
                stats->tx_dropped++;
 
index 7f0272558befe9ecdeaf110d2b95f8754cea8939..ef417dcddbf74a59dffff3b82fbff55c13277174 100644 (file)
@@ -592,6 +592,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
        case -ESHUTDOWN:
                return;
 
@@ -862,7 +863,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
                goto cleanup_unregister_candev;
        }
 
-       dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
+       dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
 
        return 0;
 
index ea01f24f15e77f4b9765d3bed76ce71527e39dad..b62d47210db8d1e1af522b4ddf1ed073e0977991 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/netdevice.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
-#include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/mii.h>
index b721a2009b5030f440bed9eab8ed4f7003ae25a4..23b45da784cb601a7abf84b212717aee7dc64403 100644 (file)
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
                                slice_num, false);
        bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
-                               slice_num, true);
+                               SLICE_NUM_MASK, true);
 
        /* Insert into TCAM now because we need to insert a second rule */
        bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        /* Insert into Action and policer RAMs now, set chain ID to
         * the one we are chained to
         */
-       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
                                      queue_num, true);
        if (ret)
                goto out_err;
index 8171055fde7a0238fb2fbc691a482c211d4d8d5b..66d33e97cbc5426b71395f878bc4e648dc3f5182 100644 (file)
@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
        u16 mask;
 
        mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
-       mask |= GENMASK(chip->g1_irq.nirqs, 0);
+       mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
        mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
 
        free_irq(chip->irq, chip);
@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
        return 0;
 
 out_disable:
-       mask |= GENMASK(chip->g1_irq.nirqs, 0);
+       mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
        mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
 
 out_mapping:
@@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
        { },
 };
 
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+       struct mv88e6xxx_mdio_bus *mdio_bus;
+       struct mii_bus *bus;
+
+       list_for_each_entry(mdio_bus, &chip->mdios, list) {
+               bus = mdio_bus->bus;
+
+               mdiobus_unregister(bus);
+       }
+}
+
 static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
                                    struct device_node *np)
 {
@@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
                match = of_match_node(mv88e6xxx_mdio_external_match, child);
                if (match) {
                        err = mv88e6xxx_mdio_register(chip, child, true);
-                       if (err)
+                       if (err) {
+                               mv88e6xxx_mdios_unregister(chip);
                                return err;
+                       }
                }
        }
 
        return 0;
 }
 
-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-
-{
-       struct mv88e6xxx_mdio_bus *mdio_bus;
-       struct mii_bus *bus;
-
-       list_for_each_entry(mdio_bus, &chip->mdios, list) {
-               bus = mdio_bus->bus;
-
-               mdiobus_unregister(bus);
-       }
-}
-
 static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
index e278e3d96ee010235ec4d21696a996b68cb3ef18..c6163874e4e7e136fb953aff74eab4fff401cb75 100644 (file)
@@ -220,9 +220,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
 
                /* RMII TX/RX needs always a rate of 25MHz */
                err = clk_set_rate(priv->macclk, 25000000);
-               if (err)
+               if (err) {
                        dev_err(dev,
                                "failed to change mac clock rate (%d)\n", err);
+                       goto out_clk_disable_macclk;
+               }
        }
 
        err = arc_emac_probe(ndev, interface);
@@ -232,7 +234,8 @@ static int emac_rockchip_probe(struct platform_device *pdev)
        }
 
        return 0;
-
+out_clk_disable_macclk:
+       clk_disable_unprepare(priv->macclk);
 out_regulator_disable:
        if (priv->regulator)
                regulator_disable(priv->regulator);
index c5c38d4b7d1ccd04044f972777c5d3844e755a02..61ca4eb7c6fa983165fc0308b22f2e29fbf30ef6 100644 (file)
@@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                         * here forever if we consistently cannot allocate
                         * buffers.
                         */
-                       else if (rc == -ENOMEM)
+                       else if (rc == -ENOMEM && budget)
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
@@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                                cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
 
                        rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
-                       if (likely(rc == -EIO))
+                       if (likely(rc == -EIO) && budget)
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        u16 cp_ring_id, len = 0;
        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
        u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+       struct hwrm_short_input short_input = {0};
 
        req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
        memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD) {
                void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
-               struct hwrm_short_input short_input = {0};
 
                memcpy(short_cmd_req, req, msg_len);
                memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
        if (netif_running(dev))
                dev_close(dev);
 
+       bnxt_ulp_shutdown(bp);
+
        if (system_state == SYSTEM_POWER_OFF) {
-               bnxt_ulp_shutdown(bp);
                bnxt_clear_int_mode(bp);
                pci_wake_from_d3(pdev, bp->wol);
                pci_set_power_state(pdev, PCI_D3hot);
index d5031f436f8341ac98d7b1074f22bec9e107245d..3d201d7324bdc7b2c50377e5da5b3ab3acb8a423 100644 (file)
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
 {
        int ifindex = tcf_mirred_ifindex(tc_act);
        struct net_device *dev;
-       u16 dst_fid;
 
        dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
        if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
                return -EINVAL;
        }
 
-       /* find the FID from dev */
-       dst_fid = bnxt_flow_get_dst_fid(bp, dev);
-       if (dst_fid == BNXT_FID_INVALID) {
-               netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
-               return -EINVAL;
-       }
-
        actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
-       actions->dst_fid = dst_fid;
        actions->dst_dev = dev;
        return 0;
 }
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
        if (rc)
                return rc;
 
-       /* Tunnel encap/decap action must be accompanied by a redirect action */
-       if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
-            actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
-           !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
-               netdev_info(bp->dev,
-                           "error: no redir action along with encap/decap");
-               return -EINVAL;
+       if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
+               if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
+                       /* dst_fid is PF's fid */
+                       actions->dst_fid = bp->pf.fw_fid;
+               } else {
+                       /* find the FID from dst_dev */
+                       actions->dst_fid =
+                               bnxt_flow_get_dst_fid(bp, actions->dst_dev);
+                       if (actions->dst_fid == BNXT_FID_INVALID)
+                               return -EINVAL;
+               }
        }
 
        return rc;
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
        }
 
        if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
-               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
-                          CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
+               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
                ether_addr_copy(req.dst_macaddr, l2_info->dmac);
-               ether_addr_copy(req.src_macaddr, l2_info->smac);
        }
        if (l2_info->num_vlans) {
                enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
 
 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
                                       struct ip_tunnel_key *tun_key,
-                                      struct bnxt_tc_l2_key *l2_info,
-                                      struct net_device *real_dst_dev)
+                                      struct bnxt_tc_l2_key *l2_info)
 {
 #ifdef CONFIG_INET
+       struct net_device *real_dst_dev = bp->dev;
        struct flowi4 flow = { {0} };
        struct net_device *dst_dev;
        struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
         */
        tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
        tun_key.tp_dst = flow->tun_key.tp_dst;
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
        if (rc)
                goto put_decap;
 
-       decap_key->ttl = tun_key.ttl;
        decap_l2_info = &decap_node->l2_info;
+       /* decap smac is wildcarded */
        ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
-       ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
        if (l2_info.num_vlans) {
                decap_l2_info->num_vlans = l2_info.num_vlans;
                decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
        if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
                goto done;
 
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
-                                        flow->actions.dst_dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
        if (rc)
                goto put_encap;
 
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
        return 0;
 }
 
+static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
+                               u16 src_fid)
+{
+       if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+               flow->src_fid = bp->pf.fw_fid;
+       else
+               flow->src_fid = src_fid;
+}
+
 /* Add a new flow or replace an existing flow.
  * Notes on locking:
  * There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
        rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
        if (rc)
                goto free_node;
-       flow->src_fid = src_fid;
+
+       bnxt_tc_set_src_fid(bp, flow, src_fid);
 
        if (!bnxt_tc_can_offload(bp, flow)) {
                rc = -ENOSPC;
index 6aa0eee88ea529963850828fc1ab46eb36d75095..a5eecd895a8253d753bea0fb273da0bf49005d13 100644 (file)
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
                                dev_err(&oct->pci_dev->dev,
                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
                                        core);
-                                       err_msg_was_printed[core] = true;
+                               err_msg_was_printed[core] = true;
                        }
                }
 
index 8b2c31e2a2b0281d6ca8c70bbf3a520bdd15eb31..a3d12dbde95b6d71634c8502c6eb3509be057049 100644 (file)
@@ -1355,6 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 
        /* Offload checksum calculation to HW */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (ip.v4->version == 4)
+                       hdr->csum_l3 = 1; /* Enable IP csum calculation */
                hdr->l3_offset = skb_network_offset(skb);
                hdr->l4_offset = skb_transport_offset(skb);
 
index 5be52d89b182ec4d1c46cc692a0e446d6925c618..7f837006bb6adf04a844d08a2a88d0957f704443 100644 (file)
@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
 
        gfar_init_addr_hash_table(priv);
 
-       /* Insert receive time stamps into padding alignment bytes */
+       /* Insert receive time stamps into padding alignment bytes, and
+        * plus 2 bytes padding to ensure the cpu alignment.
+        */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
-               priv->padding = 8;
+               priv->padding = 8 + DEFAULT_PADDING;
 
        if (dev->features & NETIF_F_IP_CSUM ||
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
                GFAR_SUPPORTED_GBIT : 0;
        phy_interface_t interface;
        struct phy_device *phydev;
+       struct ethtool_eee edata;
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
        /* Add support for flow control, but don't advertise it by default */
        phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
 
+       /* disable EEE autoneg, EEE not supported by eTSEC */
+       memset(&edata, 0, sizeof(struct ethtool_eee));
+       phy_ethtool_set_eee(phydev, &edata);
+
        return 0;
 }
 
index c9798210fa0f6cef39aec36ed5c64fe6e805d7dc..0495487f7b42e7e80d416a2212fad2d8ca786f71 100644 (file)
@@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
                        dev->regs + MVMDIO_ERR_INT_MASK);
 
        } else if (dev->err_interrupt == -EPROBE_DEFER) {
-               return -EPROBE_DEFER;
+               ret = -EPROBE_DEFER;
+               goto out_mdio;
        }
 
        if (pdev->dev.of_node)
index fed2b2f909fcf06b98441863ac92ff63a2fd01f7..634b2f41cc9e43ef66d2cf2c33e393221b92af23 100644 (file)
@@ -85,7 +85,7 @@
 
 /* RSS Registers */
 #define MVPP22_RSS_INDEX                       0x1500
-#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  ((idx) << 8)
+#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  (idx)
 #define     MVPP22_RSS_INDEX_TABLE(idx)                ((idx) << 8)
 #define     MVPP22_RSS_INDEX_QUEUE(idx)                ((idx) << 16)
 #define MVPP22_RSS_TABLE_ENTRY                 0x1508
index 924a05e05da027523e7845728c6ddc4d41accc0b..78b36c67c232f661d5c2633034c5ada015bf1aeb 100644 (file)
@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
 {
        u8 __iomem *mem = port->eth_stats;
 
-       /* TX and RX stats are flipped as we are returning the stats as seen
-        * at the switch port corresponding to the phys port.
-        */
-       stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
-       stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
-       stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
+       stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
+       stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
+       stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
 
-       stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
-       stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
-       stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
+       stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
+       stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
+       stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
 }
 
 static void
index 71bee1af71effaea4359f765df82c4654ca96567..df21e900f874036bfaf349fa334acf6c339acf3c 100644 (file)
@@ -195,6 +195,7 @@ err2:
 err1:
        rmnet_unregister_real_device(real_dev, port);
 err0:
+       kfree(ep);
        return err;
 }
 
index 29842ccc91a9d35ff49a0e5ac1cb61ec61fb3b49..08e4afc0ab39b42ff8585a466daf64d9f25fb703 100644 (file)
@@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 
        if (skb_headroom(skb) < required_headroom) {
                if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
-                       return RMNET_MAP_CONSUMED;
+                       goto fail;
        }
 
        map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
        if (!map_header)
-               return RMNET_MAP_CONSUMED;
+               goto fail;
 
        if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
                if (mux_id == 0xff)
@@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        skb->protocol = htons(ETH_P_MAP);
 
        return RMNET_MAP_SUCCESS;
+
+fail:
+       kfree_skb(skb);
+       return RMNET_MAP_CONSUMED;
 }
 
 static void
index 7e060aa9fbed4057c2a00f389f4e411923acb6f0..db72d13cebb9e52c3e07ac9de09670b3f547af4b 100644 (file)
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
                           entry, le32_to_cpu(txdesc->status));
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+                       dma_unmap_single(&mdp->pdev->dev,
+                                        le32_to_cpu(txdesc->addr),
                                         le32_to_cpu(txdesc->len) >> 16,
                                         DMA_TO_DEVICE);
                        dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
                        if (mdp->rx_skbuff[i]) {
                                struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
 
-                               dma_unmap_single(&ndev->dev,
+                               dma_unmap_single(&mdp->pdev->dev,
                                                 le32_to_cpu(rxdesc->addr),
                                                 ALIGN(mdp->rx_buf_sz, 32),
                                                 DMA_FROM_DEVICE);
                        }
                }
                ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+               dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
                                  mdp->rx_desc_dma);
                mdp->rx_ring = NULL;
        }
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
                sh_eth_tx_free(ndev, false);
 
                ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+               dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
                                  mdp->tx_desc_dma);
                mdp->tx_ring = NULL;
        }
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
 
                /* The size of the buffer is a multiple of 32 bytes. */
                buf_len = ALIGN(mdp->rx_buf_sz, 32);
-               dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+               dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
                                          DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, dma_addr)) {
+               if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                        kfree_skb(skb);
                        break;
                }
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
 
        /* Allocate all Rx descriptors. */
        rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-       mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
-                                         GFP_KERNEL);
+       mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
+                                         &mdp->rx_desc_dma, GFP_KERNEL);
        if (!mdp->rx_ring)
                goto ring_free;
 
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
 
        /* Allocate all Tx descriptors. */
        tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
-       mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
-                                         GFP_KERNEL);
+       mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
+                                         &mdp->tx_desc_dma, GFP_KERNEL);
        if (!mdp->tx_ring)
                goto ring_free;
        return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
-                       dma_unmap_single(&ndev->dev, dma_addr,
+                       dma_unmap_single(&mdp->pdev->dev, dma_addr,
                                         ALIGN(mdp->rx_buf_sz, 32),
                                         DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        sh_eth_set_receive_align(skb);
-                       dma_addr = dma_map_single(&ndev->dev, skb->data,
+                       dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
                                                  buf_len, DMA_FROM_DEVICE);
-                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                       if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                                kfree_skb(skb);
                                break;
                        }
@@ -2441,9 +2442,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* soft swap. */
        if (!mdp->cd->hw_swap)
                sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
-       dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+       dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
                                  DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+       if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                kfree_skb(skb);
                return NETDEV_TX_OK;
        }
index 0ea7e16f2e6e2c6d8106308e73327390e62074ce..9937a2450e573f8028046233cf82de2f51f16704 100644 (file)
@@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        }
 
        if (buffer->flags & EFX_TX_BUF_SKB) {
+               EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
                (*pkts_compl)++;
                (*bytes_compl) += buffer->skb->len;
                dev_consume_skb_any((struct sk_buff *)buffer->skb);
@@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_buffer *buffer;
+       unsigned int bytes_compl = 0;
+       unsigned int pkts_compl = 0;
 
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
                buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
-               efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
        }
 }
 
index f63c2ddced3c9a1e90f4425b28be95d202379a71..d7250539d0bd0c61c92fc9460c9e1197bb57ac8f 100644 (file)
@@ -2588,6 +2588,7 @@ static int stmmac_open(struct net_device *dev)
 
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+       priv->mss = 0;
 
        ret = alloc_dma_desc_resources(priv);
        if (ret < 0) {
index 11c1e7950fe58002b1b2b52e6af395dbfc7b6863..77cc4fbaeace4836419b2232913f8d78351e0148 100644 (file)
@@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
                .flowi4_oif = dev->ifindex,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
+               .flowi4_mark = skb->mark,
                .daddr = ip4h->daddr,
                .saddr = ip4h->saddr,
        };
index fdb43dd9b5cd424f4dde02f1257070ffe4b50fb1..ab4614113403455c1eee1c2ad69c7cebc6da5c9d 100644 (file)
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
        return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
 }
 
+/* Center KSZ9031RNX FLP timing at 16ms. */
 static int ksz9031_center_flp_timing(struct phy_device *phydev)
 {
        int result;
 
-       /* Center KSZ9031RNX FLP timing at 16ms. */
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+       if (result)
+               return result;
+
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
-
        if (result)
                return result;
 
index e3bbc70372d3ba73517514c1eda6a861d35d9524..5dc9668dde34fe6b810c48f2bcd63e8609caa74e 100644 (file)
@@ -773,6 +773,7 @@ void phylink_stop(struct phylink *pl)
                sfp_upstream_stop(pl->sfp_bus);
 
        set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+       queue_work(system_power_efficient_wq, &pl->resolve);
        flush_work(&pl->resolve);
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
index e381811e5f1143f35432e6624e80c00b13f0b56e..9dfc1c4c954f3230c7f6419ac2c59ad85b26c1c4 100644 (file)
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
 {
        unsigned int los = sfp->state & SFP_F_LOS;
 
-       /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor
-        * SFP_OPTIONS_LOS_NORMAL are set?  For now, we assume
-        * the same as SFP_OPTIONS_LOS_NORMAL set.
+       /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
+        * are set, we assume that no LOS signal is available.
         */
-       if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
+       if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
                los ^= SFP_F_LOS;
+       else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
+               los = 0;
 
        if (los)
                sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
                sfp_sm_link_up(sfp);
 }
 
+static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_LOW) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_HIGH);
+}
+
+static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_HIGH) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_LOW);
+}
+
 static void sfp_sm_fault(struct sfp *sfp, bool warn)
 {
        if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
                return -EINVAL;
        }
 
+       /* If the module requires address swap mode, warn about it */
+       if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+               dev_warn(sfp->dev,
+                        "module address swap to access page 0xA2 is not supported.\n");
+
        return sfp_module_insert(sfp->sfp_bus, &sfp->id);
 }
 
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
        case SFP_S_WAIT_LOS:
                if (event == SFP_E_TX_FAULT)
                        sfp_sm_fault(sfp, true);
-               else if (event ==
-                        (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                         SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
+               else if (sfp_los_event_inactive(sfp, event))
                        sfp_sm_link_up(sfp);
                break;
 
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
                if (event == SFP_E_TX_FAULT) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_fault(sfp, true);
-               } else if (event ==
-                          (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                           SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
+               } else if (sfp_los_event_active(sfp, event)) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
                }
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
 {
        /* locking... and check module is present */
 
-       if (sfp->id.ext.sff8472_compliance) {
+       if (sfp->id.ext.sff8472_compliance &&
+           !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
                modinfo->type = ETH_MODULE_SFF_8472;
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
        } else {
index e9489b88407ce1677385fe480592958b57d02c8d..0a886fda01291efb5a6beb0a2b5eb2123c1f05ab 100644 (file)
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
        DEFINE_WAIT(wait);
        ssize_t ret = 0;
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (skb)
                goto put;
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
                       size_t total_len, int flags)
 {
        struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       struct sk_buff *skb = m->msg_control;
        int ret;
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+               if (skb)
+                       kfree_skb(skb);
                return -EINVAL;
-       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       }
+       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
index 95749006d687b971a49894c903fcc611bc25c375..4f4a842a1c9cb8ac3397b329854a0fc7bd2f6aa3 100644 (file)
@@ -1952,8 +1952,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (!skb) {
                /* Read frames from ring */
@@ -2069,22 +2072,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = tun_get(tfile);
+       struct sk_buff *skb = m->msg_control;
        int ret;
 
-       if (!tun)
-               return -EBADFD;
+       if (!tun) {
+               ret = -EBADFD;
+               goto out_free_skb;
+       }
 
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
                ret = -EINVAL;
-               goto out;
+               goto out_put_tun;
        }
        if (flags & MSG_ERRQUEUE) {
                ret = sock_recv_errqueue(sock->sk, m, total_len,
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
-       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > (ssize_t)total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2092,6 +2097,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 out:
        tun_put(tun);
        return ret;
+
+out_put_tun:
+       tun_put(tun);
+out_free_skb:
+       if (skb)
+               kfree_skb(skb);
+       return ret;
 }
 
 static int tun_peek_len(struct socket *sock)
index c750cf7c042b004ecfbbce64aefb3d0f1d512c82..304ec6555cd88b3b058b944f6120df3294adb1ca 100644 (file)
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
                net->hard_header_len = 0;
                net->addr_len        = 0;
                net->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+               set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: raw IP\n");
        } else if (!net->header_ops) { /* don't bother if already set */
                ether_setup(net);
+               clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: Ethernet\n");
        }
 
index 80348b6a864668d0b7535906084bf64f967a1448..d56fe32bf48dea8c617c011d5bd6ddc8d9d5270f 100644 (file)
@@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                return -ENOLINK;
        }
 
-       skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+       if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+               skb = __netdev_alloc_skb(dev->net, size, flags);
+       else
+               skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
index d6dff347f8962e04208306c0ffea90956470cddd..78ebe494fef02b8d31505262f8c551e27aee7dc5 100644 (file)
@@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Obtain the queue to be used to transmit this packet */
        index = skb_get_queue_mapping(skb);
        if (index >= num_queues) {
-               pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
+               pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
                                    index, vif->dev->name);
                index %= num_queues;
        }
index 25da74d310d1bbd5e7c62f9a35de94f6279fbc25..f837d666cbd499c8e33a1514f55344a1796005a1 100644 (file)
@@ -1449,19 +1449,19 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
        int srcu_idx, ret;
        u8 data[16] = { 0, };
 
+       ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+       if (unlikely(!ns))
+               return -EWOULDBLOCK;
+
        put_unaligned_le64(key, &data[0]);
        put_unaligned_le64(sa_key, &data[8]);
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = op;
-       c.common.nsid = cpu_to_le32(head->ns_id);
+       c.common.nsid = cpu_to_le32(ns->head->ns_id);
        c.common.cdw10[0] = cpu_to_le32(cdw10);
 
-       ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
-       if (unlikely(!ns))
-               ret = -EWOULDBLOCK;
-       else
-               ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+       ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
        nvme_put_ns_from_disk(head, srcu_idx);
        return ret;
 }
@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       struct nvme_ns_head *head = ns->head;
-
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -2980,15 +2978,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        nvme_mpath_clear_current_path(ns);
-       if (head)
-               list_del_rcu(&ns->siblings);
+       list_del_rcu(&ns->siblings);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
        mutex_unlock(&ns->ctrl->namespaces_mutex);
 
-       synchronize_srcu(&head->srcu);
+       synchronize_srcu(&ns->head->srcu);
        nvme_put_ns(ns);
 }
 
index 42232e731f19f71d31e5808af8513ed73bbf7ee8..9ba614953607eba072000fb10c2854ea1ee677d6 100644 (file)
@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 
+static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+               struct request *rq)
+{
+       struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+       /*
+        * We cannot accept any other command until the connect command has
+        * completed, so only allow connect to pass.
+        */
+       if (!blk_rq_is_passthrough(rq) ||
+           cmd->common.opcode != nvme_fabrics_command ||
+           cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+               /*
+                * Reconnecting state means transport disruption, which can take
+                * a long time and even might fail permanently, fail fast to
+                * give upper layers a chance to failover.
+                * Deleting state means that the ctrl will never accept commands
+                * again, fail it permanently.
+                */
+               if (ctrl->state == NVME_CTRL_RECONNECTING ||
+                   ctrl->state == NVME_CTRL_DELETING) {
+                       nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+                       return BLK_STS_IOERR;
+               }
+               return BLK_STS_RESOURCE; /* try again later */
+       }
+
+       return BLK_STS_OK;
+}
+
 #endif /* _NVME_FABRICS_H */
index 7ab0be55c7d063b31f1a9525a2961308b0d8a274..0a8af4daef8903f8ba983d345f1044498c57a975 100644 (file)
@@ -31,7 +31,8 @@
 
 
 enum nvme_fc_queue_flags {
-       NVME_FC_Q_CONNECTED = (1 << 0),
+       NVME_FC_Q_CONNECTED = 0,
+       NVME_FC_Q_LIVE,
 };
 
 #define NVMEFC_QUEUE_DELAY     3               /* ms units */
@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
        if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
                return;
 
+       clear_bit(NVME_FC_Q_LIVE, &queue->flags);
        /*
         * Current implementation never disconnects a single queue.
         * It always terminates a whole association. So there is never
@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
-       clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
 }
 
 static void
@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        break;
+
+               set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
        }
 
        return ret;
@@ -2320,6 +2323,14 @@ busy:
        return BLK_STS_RESOURCE;
 }
 
+static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
+}
+
 static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        u32 data_len;
        blk_status_t ret;
 
+       ret = nvme_fc_is_ready(queue, rq);
+       if (unlikely(ret))
+               return ret;
+
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
                return ret;
@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_disconnect_admin_queue;
 
+       set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
        /*
         * Check controller capabilities
         *
index 78d92151a9042b6190514e074575821bda684e7f..1218a9fca8466b874e45f97d47b9c70234916ec7 100644 (file)
@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
                bio->bi_opf |= REQ_NVME_MPATH;
                ret = direct_make_request(bio);
        } else if (!list_empty_careful(&head->list)) {
-               dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+               dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
 
                spin_lock_irq(&head->requeue_lock);
                bio_list_add(&head->requeue_list, bio);
index c0873a68872fb188228f52ac44355761a82ab721..ea1aa5283e8ed9215537594a33a243d47b363e25 100644 (file)
@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  * found empirically.
  */
-#define NVME_QUIRK_DELAY_AMOUNT                2000
+#define NVME_QUIRK_DELAY_AMOUNT                2300
 
 enum nvme_ctrl_state {
        NVME_CTRL_NEW,
index a11cfd470089226cffd01c9c6104afdc876c341a..f5800c3c9082a6f038c129327bccd22f5eb861fe 100644 (file)
@@ -1759,6 +1759,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
                        dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
                        dev->host_mem_descs, dev->host_mem_descs_dma);
        dev->host_mem_descs = NULL;
+       dev->nr_host_mem_descs = 0;
 }
 
 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
@@ -1787,7 +1788,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
        if (!bufs)
                goto out_free_descs;
 
-       for (size = 0; size < preferred; size += len) {
+       for (size = 0; size < preferred && i < max_entries; size += len) {
                dma_addr_t dma_addr;
 
                len = min_t(u64, chunk_size, preferred - size);
@@ -2428,7 +2429,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
        return -ENODEV;
 }
 
-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 {
        if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
                /*
@@ -2443,6 +2444,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
                    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
                     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
                        return NVME_QUIRK_NO_DEEPEST_PS;
+       } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+               /*
+                * Samsung SSD 960 EVO drops off the PCIe bus after system
+                * suspend on a Ryzen board, ASUS PRIME B350M-A.
+                */
+               if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+                   dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
+                       return NVME_QUIRK_NO_APST;
        }
 
        return 0;
@@ -2482,7 +2491,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto unmap;
 
-       quirks |= check_dell_samsung_bug(pdev);
+       quirks |= check_vendor_combination_bug(pdev);
 
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
                        quirks);
@@ -2665,6 +2674,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+       { PCI_DEVICE(0x1c58, 0x0023),   /* WDC SN200 adapter */
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
index 4f9bf2f815c399f3f7f39d5b6d485dbe75a2466f..37af56596be6ce8a0339ce2a3151f8dd98f8f854 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <rdma/mr_pool.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/atomic.h>
@@ -59,6 +60,9 @@ struct nvme_rdma_request {
        struct nvme_request     req;
        struct ib_mr            *mr;
        struct nvme_rdma_qe     sqe;
+       union nvme_result       result;
+       __le16                  status;
+       refcount_t              ref;
        struct ib_sge           sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
        u32                     num_sge;
        int                     nents;
@@ -73,11 +77,11 @@ struct nvme_rdma_request {
 enum nvme_rdma_queue_flags {
        NVME_RDMA_Q_ALLOCATED           = 0,
        NVME_RDMA_Q_LIVE                = 1,
+       NVME_RDMA_Q_TR_READY            = 2,
 };
 
 struct nvme_rdma_queue {
        struct nvme_rdma_qe     *rsp_ring;
-       atomic_t                sig_count;
        int                     queue_size;
        size_t                  cmnd_capsule_len;
        struct nvme_rdma_ctrl   *ctrl;
@@ -258,32 +262,6 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
        return ret;
 }
 
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
-       struct nvme_rdma_ctrl *ctrl = data;
-       struct nvme_rdma_device *dev = ctrl->device;
-       struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       int ret = 0;
-
-       if (WARN_ON_ONCE(!req->mr))
-               return 0;
-
-       ib_dereg_mr(req->mr);
-
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               req->mr = NULL;
-               goto out;
-       }
-
-       req->mr->need_inval = false;
-
-out:
-       return ret;
-}
-
 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
                struct request *rq, unsigned int hctx_idx)
 {
@@ -293,9 +271,6 @@ static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
        struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
        struct nvme_rdma_device *dev = queue->device;
 
-       if (req->mr)
-               ib_dereg_mr(req->mr);
-
        nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
                        DMA_TO_DEVICE);
 }
@@ -317,21 +292,9 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
        if (ret)
                return ret;
 
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               goto out_free_qe;
-       }
-
        req->queue = queue;
 
        return 0;
-
-out_free_qe:
-       nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       return -ENOMEM;
 }
 
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -428,10 +391,23 @@ out_err:
 
 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 {
-       struct nvme_rdma_device *dev = queue->device;
-       struct ib_device *ibdev = dev->dev;
+       struct nvme_rdma_device *dev;
+       struct ib_device *ibdev;
 
-       rdma_destroy_qp(queue->cm_id);
+       if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+               return;
+
+       dev = queue->device;
+       ibdev = dev->dev;
+
+       ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+       /*
+        * The cm_id object might have been destroyed during RDMA connection
+        * establishment error flow to avoid getting other cma events, thus
+        * the destruction of the QP shouldn't use rdma_cm API.
+        */
+       ib_destroy_qp(queue->qp);
        ib_free_cq(queue->ib_cq);
 
        nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
@@ -440,6 +416,12 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
        nvme_rdma_dev_put(dev);
 }
 
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+       return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+                    ibdev->attrs.max_fast_reg_page_list_len);
+}
+
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
        struct ib_device *ibdev;
@@ -482,8 +464,24 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
                goto out_destroy_qp;
        }
 
+       ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+                             queue->queue_size,
+                             IB_MR_TYPE_MEM_REG,
+                             nvme_rdma_get_max_fr_pages(ibdev));
+       if (ret) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "failed to initialize MR pool sized %d for QID %d\n",
+                       queue->queue_size, idx);
+               goto out_destroy_ring;
+       }
+
+       set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
        return 0;
 
+out_destroy_ring:
+       nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+                           sizeof(struct nvme_completion), DMA_FROM_DEVICE);
 out_destroy_qp:
        rdma_destroy_qp(queue->cm_id);
 out_destroy_ib_cq:
@@ -510,7 +508,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
                queue->cmnd_capsule_len = sizeof(struct nvme_command);
 
        queue->queue_size = queue_size;
-       atomic_set(&queue->sig_count, 0);
 
        queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
                        RDMA_PS_TCP, IB_QPT_RC);
@@ -546,6 +543,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
 
 out_destroy_cm_id:
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        return ret;
 }
 
@@ -756,8 +754,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->device = ctrl->queues[0].device;
 
-       ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-               ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+       ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
@@ -771,10 +768,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
                        error = PTR_ERR(ctrl->ctrl.admin_q);
                        goto out_free_tagset;
                }
-       } else {
-               error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
-               if (error)
-                       goto out_free_queue;
        }
 
        error = nvme_rdma_start_queue(ctrl, 0);
@@ -854,10 +847,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
                        goto out_free_tag_set;
                }
        } else {
-               ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
-               if (ret)
-                       goto out_free_io_queues;
-
                blk_mq_update_nr_hw_queues(&ctrl->tag_set,
                        ctrl->ctrl.queue_count - 1);
        }
@@ -1018,8 +1007,18 @@ static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
 
 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-       if (unlikely(wc->status != IB_WC_SUCCESS))
+       struct nvme_rdma_request *req =
+               container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+       struct request *rq = blk_mq_rq_from_pdu(req);
+
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
                nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+               return;
+       }
+
+       if (refcount_dec_and_test(&req->ref))
+               nvme_end_request(rq, req->status, req->result);
+
 }
 
 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1030,7 +1029,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
                .opcode             = IB_WR_LOCAL_INV,
                .next               = NULL,
                .num_sge            = 0,
-               .send_flags         = 0,
+               .send_flags         = IB_SEND_SIGNALED,
                .ex.invalidate_rkey = req->mr->rkey,
        };
 
@@ -1044,22 +1043,15 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
-       int res;
 
        if (!blk_rq_bytes(rq))
                return;
 
-       if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
-               res = nvme_rdma_inv_rkey(queue, req);
-               if (unlikely(res < 0)) {
-                       dev_err(ctrl->ctrl.device,
-                               "Queueing INV WR for rkey %#x failed (%d)\n",
-                               req->mr->rkey, res);
-                       nvme_rdma_error_recovery(queue->ctrl);
-               }
+       if (req->mr) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
        }
 
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
@@ -1118,12 +1110,18 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
 
+       req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+       if (WARN_ON_ONCE(!req->mr))
+               return -EAGAIN;
+
        /*
         * Align the MR to a 4K page size to match the ctrl page size and
         * the block virtual boundary.
         */
        nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
        if (unlikely(nr < count)) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
                if (nr < 0)
                        return nr;
                return -EINVAL;
@@ -1142,8 +1140,6 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->mr->need_inval = true;
-
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
        put_unaligned_le32(req->mr->rkey, sg->key);
@@ -1163,7 +1159,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->mr->need_inval = false;
+       refcount_set(&req->ref, 2); /* send and recv completions */
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1200,25 +1196,24 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-       if (unlikely(wc->status != IB_WC_SUCCESS))
-               nvme_rdma_wr_error(cq, wc, "SEND");
-}
+       struct nvme_rdma_qe *qe =
+               container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+       struct nvme_rdma_request *req =
+               container_of(qe, struct nvme_rdma_request, sqe);
+       struct request *rq = blk_mq_rq_from_pdu(req);
 
-/*
- * We want to signal completion at least every queue depth/2.  This returns the
- * largest power of two that is not above half of (queue size + 1) to optimize
- * (avoid divisions).
- */
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
-{
-       int limit = 1 << ilog2((queue->queue_size + 1) / 2);
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
+               nvme_rdma_wr_error(cq, wc, "SEND");
+               return;
+       }
 
-       return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
+       if (refcount_dec_and_test(&req->ref))
+               nvme_end_request(rq, req->status, req->result);
 }
 
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
                struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
-               struct ib_send_wr *first, bool flush)
+               struct ib_send_wr *first)
 {
        struct ib_send_wr wr, *bad_wr;
        int ret;
@@ -1227,31 +1222,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
        sge->length = sizeof(struct nvme_command),
        sge->lkey   = queue->device->pd->local_dma_lkey;
 
-       qe->cqe.done = nvme_rdma_send_done;
-
        wr.next       = NULL;
        wr.wr_cqe     = &qe->cqe;
        wr.sg_list    = sge;
        wr.num_sge    = num_sge;
        wr.opcode     = IB_WR_SEND;
-       wr.send_flags = 0;
-
-       /*
-        * Unsignalled send completions are another giant desaster in the
-        * IB Verbs spec:  If we don't regularly post signalled sends
-        * the send queue will fill up and only a QP reset will rescue us.
-        * Would have been way to obvious to handle this in hardware or
-        * at least the RDMA stack..
-        *
-        * Always signal the flushes. The magic request used for the flush
-        * sequencer is not allocated in our driver's tagset and it's
-        * triggered to be freed by blk_cleanup_queue(). So we need to
-        * always mark it as signaled to ensure that the "wr_cqe", which is
-        * embedded in request's payload, is not freed when __ib_process_cq()
-        * calls wr_cqe->done().
-        */
-       if (nvme_rdma_queue_sig_limit(queue) || flush)
-               wr.send_flags |= IB_SEND_SIGNALED;
+       wr.send_flags = IB_SEND_SIGNALED;
 
        if (first)
                first->next = &wr;
@@ -1301,6 +1277,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
        return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       if (unlikely(wc->status != IB_WC_SUCCESS))
+               nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
@@ -1319,10 +1301,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
        cmd->common.flags |= NVME_CMD_SGL_METABUF;
        nvme_rdma_set_sg_null(cmd);
 
+       sqe->cqe.done = nvme_rdma_async_done;
+
        ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
                        DMA_TO_DEVICE);
 
-       ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+       ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
        WARN_ON_ONCE(ret);
 }
 
@@ -1343,14 +1327,34 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
        }
        req = blk_mq_rq_to_pdu(rq);
 
-       if (rq->tag == tag)
-               ret = 1;
+       req->status = cqe->status;
+       req->result = cqe->result;
 
-       if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
-           wc->ex.invalidate_rkey == req->mr->rkey)
-               req->mr->need_inval = false;
+       if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+               if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+                       dev_err(queue->ctrl->ctrl.device,
+                               "Bogus remote invalidation for rkey %#x\n",
+                               req->mr->rkey);
+                       nvme_rdma_error_recovery(queue->ctrl);
+               }
+       } else if (req->mr) {
+               ret = nvme_rdma_inv_rkey(queue, req);
+               if (unlikely(ret < 0)) {
+                       dev_err(queue->ctrl->ctrl.device,
+                               "Queueing INV WR for rkey %#x failed (%d)\n",
+                               req->mr->rkey, ret);
+                       nvme_rdma_error_recovery(queue->ctrl);
+               }
+               /* the local invalidation completion will end the request */
+               return 0;
+       }
+
+       if (refcount_dec_and_test(&req->ref)) {
+               if (rq->tag == tag)
+                       ret = 1;
+               nvme_end_request(rq, req->status, req->result);
+       }
 
-       nvme_end_request(rq, cqe->status, cqe->result);
        return ret;
 }
 
@@ -1591,31 +1595,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
  * We cannot accept any other command until the Connect command has completed.
  */
 static inline blk_status_t
-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
-       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-               struct nvme_command *cmd = nvme_req(rq)->cmd;
-
-               if (!blk_rq_is_passthrough(rq) ||
-                   cmd->common.opcode != nvme_fabrics_command ||
-                   cmd->fabrics.fctype != nvme_fabrics_type_connect) {
-                       /*
-                        * reconnecting state means transport disruption, which
-                        * can take a long time and even might fail permanently,
-                        * fail fast to give upper layers a chance to failover.
-                        * deleting state means that the ctrl will never accept
-                        * commands again, fail it permanently.
-                        */
-                       if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
-                           queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
-                               nvme_req(rq)->status = NVME_SC_ABORT_REQ;
-                               return BLK_STS_IOERR;
-                       }
-                       return BLK_STS_RESOURCE; /* try again later */
-               }
-       }
-
-       return 0;
+nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1627,14 +1611,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_qe *sqe = &req->sqe;
        struct nvme_command *c = sqe->data;
-       bool flush = false;
        struct ib_device *dev;
        blk_status_t ret;
        int err;
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       ret = nvme_rdma_queue_is_ready(queue, rq);
+       ret = nvme_rdma_is_ready(queue, rq);
        if (unlikely(ret))
                return ret;
 
@@ -1656,13 +1639,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                goto err;
        }
 
+       sqe->cqe.done = nvme_rdma_send_done;
+
        ib_dma_sync_single_for_device(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-       if (req_op(rq) == REQ_OP_FLUSH)
-               flush = true;
        err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+                       req->mr ? &req->reg_wr.wr : NULL);
        if (unlikely(err)) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
@@ -1810,7 +1793,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_delete_ctrl,
        .get_address            = nvmf_get_address,
-       .reinit_request         = nvme_rdma_reinit_request,
 };
 
 static inline bool
index 664d3013f68f3484980da8cf1c9a93f0b49f4265..5fd86039e35362b01e6f1615c4fb835c2e019728 100644 (file)
@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 
        tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
 
+       /* release the queue lookup reference on the completed IO */
+       nvmet_fc_tgt_q_put(queue);
+
        spin_lock_irqsave(&queue->qlock, flags);
        deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
                                struct nvmet_fc_defer_fcp_req, req_list);
        if (!deferfcp) {
                list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
                spin_unlock_irqrestore(&queue->qlock, flags);
-
-               /* Release reference taken at queue lookup and fod allocation */
-               nvmet_fc_tgt_q_put(queue);
                return;
        }
 
@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
                tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
                                deferfcp->fcp_req);
 
+               /* release the queue lookup reference */
+               nvmet_fc_tgt_q_put(queue);
+
                kfree(deferfcp);
 
                spin_lock_irqsave(&queue->qlock, flags);
index 96d390416789b41e2a6259a52851c3267cc1d4d9..1e21b286f299834298fb9d23f06f9f20e0797157 100644 (file)
@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
        return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
 }
 
+enum nvme_loop_queue_flags {
+       NVME_LOOP_Q_LIVE        = 0,
+};
+
 struct nvme_loop_queue {
        struct nvmet_cq         nvme_cq;
        struct nvmet_sq         nvme_sq;
        struct nvme_loop_ctrl   *ctrl;
+       unsigned long           flags;
 };
 
 static struct nvmet_port *nvmet_loop_port;
@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        return BLK_EH_HANDLED;
 }
 
+static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
+}
+
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
        blk_status_t ret;
 
+       ret = nvme_loop_is_ready(queue, req);
+       if (unlikely(ret))
+               return ret;
+
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
                return ret;
@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
 {
        int i;
 
-       for (i = 1; i < ctrl->ctrl.queue_count; i++)
+       for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+               clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+       }
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        return ret;
+               set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
        }
 
        return 0;
@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        if (error)
                goto out_cleanup_queue;
 
+       set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
        error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
        if (error) {
                dev_err(ctrl->ctrl.device,
index 0f1ff081349395bfadec0e8034c9acad7e7b4846..66e008f7adb6c890a43199168dd118a814489797 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index c95a4784c1911ef1540450f9fc6c2ffee97fcee5..e7cd28ff1984460540f70fd203326b08f2d5c8f9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index 98fb28e49d2c0605c5ea3e2062d4ad790788d504..f035c2f25d35a8c5aa12cb0552aaf3484edb2798 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Based on.......: linux/drivers/s390/block/mdisk.c
index 8eafcd5fa0049ed9d3384aa6a8999fcec4b61ba2..1a41ef49633875a08ba2021ffa729a39881ec102 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index 6168ccdb389c37bb686196f49d68f0780b8fd010..a6b132f7e869eb4eb804b3fa8407cd064c92b699 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
index 7abb240847c07dd0b24f3f2e7f03d221a1416f5f..6aaefb78043696e658e36b6637b4e6dde59b5c83 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * dcssblk.c -- the S/390 block driver for dcss memory
  *
index eb51893c74a4ba4053fe8d15e064fbf42bed9845..b4130c7880d874862f14eeb381f36c472b231a0d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Block driver for s390 storage class memory.
  *
index 571a0709e1e5b98ba14708d13e9f944e5ad85a6a..2a6334ca750efdf68f818df0af4b08ac66b8df78 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Xpram.c -- the S/390 expanded memory RAM-disk
  *           
index c4518168fd02c98013b349e17fdba30c8d65eec5..61822480a2a0bdfa808e4f9d3e19965857e7700d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - fullscreen driver.
  *
index 251a318a9b7541452c0142f0f0f8ce84167b8dc2..1447d08872253e3498914fb6da6c3504cd207f47 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    HMC Drive DVD Module
  *
index 027ac6ae5eea512c530a9afbb87bb31ad2bedd8e..bf4ab4efed7355dd88007c7bfc304f5251530e9a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for reading z/VM *MONITOR service records.
  *
index 571a7e3527553ad905612007b860197ca4105b5a..76c158c41510374ac4b814aca55587193c8b8fe7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for writing z/VM *MONITOR service records.
  *
index 5d4f053d7c38c330d969586fa3eae0b40f6955ca..f8cd2935fbfd48c5aef1ad980457cc55433b6db4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - core functions.
  *
index 19c25427f27fdd702864153fe64f71abb2a175b0..ee6f3b563728319ba5c3d4964f05843453e3ce99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Enable Asynchronous Notification via SCLP.
  *
index de69f0ddc321dedbb7270ae9fcdf75afe1d148fc..6d73ee3f827a6ca401b0eaa5e4f66b2e6e1766e5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3480/3490 tapes.
  *
index e352047ed9f7a8d6d8ec0a70a688c1efadbfdf9d..37e65a05517f50606f73db539e0871e76452d142 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3590 tapes.
  *
index e7d23048d3f00d0ea1d2a59bf1128d38f4cb6d1d..a07102472ce97eba06a526dcb56d5690b9be2fd4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2004
  *
index 32503a60ee851698049c2fc1221ce01c581ebb6a..8d3370da2dfc294e1286caa337bd9d305fb624c5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    basic function of the tape device driver
  *
index e417ccd9e299891560b2b2c1e67565f0eb46df47..1c98023cffd4165a8ad5117c907fa9258d918c4f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    IBM/3270 Driver - tty functions.
  *
index 62559dc0169f8c9f32a4677e946e0e88880ae17f..069b9ef08206b1bc7168bdbfd4dd3de2ba026e6c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *     character device driver for reading z/VM system service records
  *
index fa90ef05afc00c32805238c4dccdb1a5c589390d..52aa894243187484c03bf301d274990cdbeacb32 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux driver for System z and s390 unit record devices
  * (z/VM virtual punch, reader, printer)
index aaed778f67c4ab84bf13dc9435260a5bdae361dc..4369662cfff5a7ad094d522590901bc845933872 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * zcore module to export memory content and register sets for creating system
  * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
@@ -7,7 +8,6 @@
  *
  * Copyright IBM Corp. 2003, 2008
  * Author(s): Michael Holzheu
- * License: GPL
  */
 
 #define KMSG_COMPONENT "zdump"
index e2f7b6e93efddf85dd45457d70ce3c3bc3602ba3..bfec1485ca2332ac5bfe8adf2e7c6c50307a3c97 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  bus driver for ccwgroup
  *
index f4166f80c4d4e4408c97e8742a7275b90464898d..5c94a3aec4dd293dfdce50e33219037564ebf118 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 1999, 2010
  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
index 7b0b295b2313b8f3056378ed64c4249381a0213b..c08fc5a8df0c61935c02e282a6ec868d2d0e7630 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- channel subsystem call
  *
index 8e7e19b9e92c028e1097fe9ff82386203594bc37..0015729d917d90e049a1def14cba883ec2c31cc3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 chsc subchannels
  *
index 89216174fcbba8f83d3a0a79a633f6e1cb91ad88..987bf9a8c9f7237d06c578e42807a6e168ef7dfd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- low level i/o calls
  *
index 7d59230e88bb3a2452e8e4eaf667f25659d3d13d..5e495c62cfa7749aef468cc137d0d5eab0c959c7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Linux on zSeries Channel Measurement Facility support
  *
@@ -7,20 +8,6 @@
  *         Cornelia Huck <cornelia.huck@de.ibm.com>
  *
  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "cio"
index d3e504c3c362655f4eec5903893d8bc8f8e5af2c..0f11dce6e2240c14151ab690fd28e1a39c0694a2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * driver for channel subsystem
  *
@@ -5,8 +6,6 @@
  *
  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index 318d8269f5dee10c56114224b4a08b8b617a96da..75a245f38e2eb7558b9da624a1e2f190cd77b8c2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *  bus driver for ccw devices
  *
@@ -5,8 +6,6 @@
  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *              Cornelia Huck (cornelia.huck@de.ibm.com)
  *              Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index dd7d79d30edc440662a02432a3ad3ce822503225..1319122e9d1231920ef0325a9e56a4c6de91ff80 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * finite state machine for device handling
  *
index cf8c4ac6323a6d1c91dfe93dfdb22e2d9d0432b3..1caf6a398760bb1f156f5c088759f12e6039e589 100644 (file)
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * Copyright IBM Corp. 2002, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 #include <linux/export.h>
 #include <linux/init.h>
index ce16e4f45d440fd25538d9223db5e07566ad22f5..53468ae64b999fa17bc154bb7eeda57293aed2da 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 eadm subchannels
  *
index c592087be0f1a6b0b8083dce448e278c2388c7c9..77fde9f5ea8baeb55ff403096358d20ff7974c5e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Functions for registration of I/O interruption subclasses on s390.
  *
index ed4852fab44b5737fa5edae05ddd640067486304..59b4a3370cd5d454cadc9e145bcb3a2dc9a321df 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux for s390 qdio support, buffer handling, qdio API and module support.
  *
index 9ae1380cbc31300f5e251f03e6027ad903b2d666..98f3cfdc0d027dd0c0e7bcd8d05be86c0fc3a09d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * qdio queue initialization
  *
index 1fa53ecdc2aaa2ec1a81b7bf65b5d0dcf32a16c2..6bca1d5455d4f6ce1997d39d09792a90e65511a0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Recognize and maintain s390 storage class memory.
  *
index 82f05c4b8c526f73a52aed819eacb33cdfd18e3a..ea6a2d0b2894decac95c3421c544183ee89c3383 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * VFIO based Physical Subchannel device driver
  *
index faeba9db3d95999526fdf2ab0667751cd82ab1e0..48d55dc9e98648738b78f3dbc311ea3e141573cd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "ap"
index 7e45c4d08cad40e9124913abd79b715312cbd990..e0827eaa42f1dda711ed08fbf8d61dc96b65630f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus header file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _AP_BUS_H_
index 8dda5bb34a2f2710c6d0f8fc40b291d3e848d99e..e7c2e4f9529ac6bab55a8df8f854c8ed64442cdc 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  pkey device driver
  *
  *  Copyright IBM Corp. 2017
  *  Author(s): Harald Freudenberger
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "pkey"
index b5f4006198b9e0d977b04c2c08d7626c48056569..ce15f101ee282701cdf55ac0ddba557001c737fc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
@@ -218,8 +205,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
        weight += atomic_read(&zq->load);
        pref_weight += atomic_read(&pref_zq->load);
        if (weight == pref_weight)
-               return &zq->queue->total_request_count >
-                       &pref_zq->queue->total_request_count;
+               return zq->queue->total_request_count >
+                       pref_zq->queue->total_request_count;
        return weight > pref_weight;
 }
 
index 73541a798db7a4a1c41bd0e555f33bd1a1bbbbde..9fff8912f6e3b05bd7b2c76a529e5a99d6aec863 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_API_H_
index f85dacf1c28442a38dac33978a20d412ceaf8c42..233e1e695208b9b870edb4259127c41e2ee3357a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index 12cff6262566b5f4c1960b5497e37da7025ad7ad..011d61d8a4ae5869e7d41d1654a218c5f53f96bc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CCA_KEY_H_
index b97c5d5ee5a4aba9e70f88f791b674046092a6f7..e701194d36115c06d4435df2b1dcc4dbc9cc103e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 0dce4b9af184114ecdc13bf3741ab167dfb402d8..c3c116777c937cd4fafbe974f765cd5725327f27 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CEX2A_H_
index e2eebc775a37a0e80ced5520cb5446d85e33fb23..f305538334adb14f0dd296fd2466cb478a2ba3ba 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012
  *  Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
index 13df60209ed33a05604e34e381d40a50b1c40904..01598d83c60a0a1c478c1ade86ea603feb05eab0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_ERROR_H_
index db5bde47dfb0d17b49a8dad9eeec8279c2a6e226..afe1b2bcd7ecf5e211712b567ca186f3a32e4a57 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 5cc280318ee705e9e64fe035b9ccb32c8aa82a40..0a36545cfb8eeb09da1f2136ba31adde5b54775d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE50_H_
index 785620d3050433e33a2af975595d6969e067e45e..f54bef4a928e90b34e7158d97f1d1db998617bf3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 7a0d5b57821f07868c9af78da9873c1eca997c2e..d314f4525518b63693d70df42e768a8dc9e8af33 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE6_H_
index 600604782b65e972705d01984568134949e61039..159b0a0dd211b9a561cad4d326c5d481b1a8418d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index eacafc8962f204377d2c95f8d9a5bfd032d16eb2..d678a3af83a7baa9dda7b1dfdcba632014b975a1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_PCIXCC_H_
index 4742be0eec24f8ca87787105eada3974ab61e4ab..720434e18007e3a8e1c9e5228c4841ba0cb782a5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index be9f172185310ac081d6a44f0e061868bc5885d3..7ce98b70cad38bf55be1fd4a15bdaa62761ff159 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2001, 2009
  * Author(s):
index c81adf8042d780a5a19b8a10e4e2a6e2e05f6de6..eb07862bd36a03f5d043ee1fb492461695a506ee 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /**
  * A generic FSM based on fsm used in isdn4linux
  *
index e131a03262ad7bcb3041a9e1e98da574930c727b..92ae84a927fcf391abebaccbd907d5d962ed9bed 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Linux for S/390 Lan Channel Station Network Driver
  *
@@ -7,20 +8,6 @@
  *            Rewritten by
  *                     Frank Pavlic <fpavlic@de.ibm.com> and
  *                     Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT         "lcs"
index b9c7c1e61da296f743f7bbd6f5d30e43d5940117..5ce2424ca7290397e43b55c66581071424da99b9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV network driver
  *
  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #define KMSG_COMPONENT "netiucv"
index 9cd569ef43ecfbaaf10680b3ac091267c1d1577f..15015a24f8ad750d2c107bf278442340cef801fd 100644 (file)
@@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
 int qeth_set_features(struct net_device *, netdev_features_t);
 void qeth_recover_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
 int qeth_vm_request_mac(struct qeth_card *card);
 int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
 
index 49b9efeba1bda1e2390289b8ba536fa7bad0542c..430e3214f7e26791af247d402734efb0ebb9cfc3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -6438,6 +6444,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       /* GSO segmentation builds skbs with
+        *      a (small) linear part for the headers, and
+        *      page frags for the data.
+        * Compared to a linear skb, the header-only part consumes an
+        * additional buffer element. This reduces buffer utilization, and
+        * hurts throughput. So compress small segments into one element.
+        */
+       if (netif_needs_gso(skb, features)) {
+               /* match skb_segment(): */
+               unsigned int doffset = skb->data - skb_mac_header(skb);
+               unsigned int hsize = skb_shinfo(skb)->gso_size;
+               unsigned int hroom = skb_headroom(skb);
+
+               /* linearize only if resulting skb allocations are order-0: */
+               if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+                       features &= ~NETIF_F_SG;
+       }
+
+       return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
        int rc;
index b22ed2a57acd94661c97b77966246f0785c77ae2..ae81534de91228910fd877fce0e1e262cc24fddf 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index d2537c09126d676a77eee6d2bbacb54030c2f657..5863ea170ff26447630ed22acd5174db23bf0ee2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -960,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_stop               = qeth_l2_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -1010,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
                card->dev->hw_features = NETIF_F_SG;
                card->dev->vlan_features = NETIF_F_SG;
+               card->dev->features |= NETIF_F_SG;
                /* OSA 3S and earlier has no RX/TX support */
                if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
                        card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1028,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
index aadd384316a375f15506cada437a818f4a621239..6a73894b0cb51d2896bea853f6a33113fed98c13 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -1376,6 +1377,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
                if (ipm) {
@@ -2917,6 +2919,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_stop               = qeth_l3_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -2957,6 +2960,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
+                               card->dev->features |= NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2984,8 +2988,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
        netif_keep_dst(card->dev);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
+       netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+                                         PAGE_SIZE);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
index a851d34c642b5d26866fafdde925eb48ddf61003..3b0c8b8a7634d18df62ece8f94936ed39666a2af 100644 (file)
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV special message driver
  *
  * Copyright IBM Corp. 2003, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 32515a201bbc65c4a0c6e9d48282a188ff16cdda..0a263999f7ae44b181ac7dc786908698c42a9974 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Deliver z/VM CP special messages (SMSG) as uevents.
  *
index 84752152d41fd682c5ae350ddb4bd3ac80d47cde..a3a8c8d9d7171a8d6994548212084ff3380ba493 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * zfcp device driver
  *
index f68af1f317f15460d489c9b8324ebc4d06142ca9..2dc4d9aab634592363138cb69a07f6885fb25438 100644 (file)
@@ -1,9 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
 # Makefile for kvm guest drivers on s390
 #
 # Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
 
 obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
index b18fe2014cf2195a193186c08c956dc8e5cfe7e3..ba2e0856d22cdfb5396457366276e01bc9ac7851 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * ccw based virtio transport
  *
  * Copyright IBM Corp. 2012, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 
index 539a26444f3181279df5803a26c1e2e59c165067..7d49d4865298a304bff9dc11d9a1684621aead10 100644 (file)
@@ -71,16 +71,12 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
        }
 
        sock_filp = sock_alloc_file(sock, 0, NULL);
-       if (IS_ERR(sock_filp)) {
-               sock_release(sock);
-               rc = PTR_ERR(sock_filp);
-               goto out;
-       }
+       if (IS_ERR(sock_filp))
+               return PTR_ERR(sock_filp);
 
        rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
 
        fput(sock_filp);
-out:
        return rc;
 }
 
index 8d626d7c2e7e79db8d243278e805c96ad563bb3d..c7bdeb6556469efb93e2a6a7e742da3a37ad7e69 100644 (file)
@@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
                /* On error, stop handling until the next kick. */
                if (unlikely(headcount < 0))
                        goto out;
-               if (nvq->rx_array)
-                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
-               /* On overrun, truncate and discard */
-               if (unlikely(headcount > UIO_MAXIOV)) {
-                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
-                       err = sock->ops->recvmsg(sock, &msg,
-                                                1, MSG_DONTWAIT | MSG_TRUNC);
-                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
-                       continue;
-               }
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
                         * they refilled. */
                        goto out;
                }
+               if (nvq->rx_array)
+                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+               /* On overrun, truncate and discard */
+               if (unlikely(headcount > UIO_MAXIOV)) {
+                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
+                       err = sock->ops->recvmsg(sock, &msg,
+                                                1, MSG_DONTWAIT | MSG_TRUNC);
+                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
+                       continue;
+               }
                /* We don't need to be notified again. */
                iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
                fixup = msg.msg_iter;
index 48230a5e12f262b67d28d87adc713f462e8ec5fc..bf7ff3934d7fff5169e5252cd8fc0a29ea25a133 100644 (file)
@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
        /* device_register() causes the bus infrastructure to look for a
         * matching driver. */
        err = device_register(&dev->dev);
+       if (err)
+               ida_simple_remove(&virtio_index_ida, dev->index);
 out:
        if (err)
                virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
index 7960746f759788d545b9e85e384a56cbf99a7606..a1fb52cb3f0ab5c0f066d3d773a82c73665f54da 100644 (file)
@@ -174,13 +174,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
        while ((page = balloon_page_pop(&pages))) {
                balloon_page_enqueue(&vb->vb_dev_info, page);
 
-               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
-
                set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
                vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
                if (!virtio_has_feature(vb->vdev,
                                        VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
                        adjust_managed_page_count(page, -1);
+               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
        }
 
        num_allocated_pages = vb->num_pfns;
index e03910cebdd490321d1a4db5681715907ca92042..804d1f905622075ab27feecf7f2c959be9c1ac01 100644 (file)
@@ -441,7 +441,10 @@ enum afs_lock_state {
 };
 
 /*
- * AFS inode private data
+ * AFS inode private data.
+ *
+ * Note that afs_alloc_inode() *must* reset anything that could incorrectly
+ * leak from one inode to another.
  */
 struct afs_vnode {
        struct inode            vfs_inode;      /* the VFS's inode record */
index 2b00097101b37bdfcf8fd5e4780245ddf8f09c39..b88b7d45fdaa029dc1239be8b295fe60dfba1f1c 100644 (file)
@@ -120,7 +120,7 @@ static void afs_hash_permits(struct afs_permits *permits)
 void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                      unsigned int cb_break)
 {
-       struct afs_permits *permits, *xpermits, *replacement, *new = NULL;
+       struct afs_permits *permits, *xpermits, *replacement, *zap, *new = NULL;
        afs_access_t caller_access = READ_ONCE(vnode->status.caller_access);
        size_t size = 0;
        bool changed = false;
@@ -204,7 +204,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
        new = kzalloc(sizeof(struct afs_permits) +
                      sizeof(struct afs_permit) * size, GFP_NOFS);
        if (!new)
-               return;
+               goto out_put;
 
        refcount_set(&new->usage, 1);
        new->nr_permits = size;
@@ -229,8 +229,6 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
 
        afs_hash_permits(new);
 
-       afs_put_permits(permits);
-
        /* Now see if the permit list we want is actually already available */
        spin_lock(&afs_permits_lock);
 
@@ -262,11 +260,15 @@ found:
        kfree(new);
 
        spin_lock(&vnode->lock);
-       if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break) ||
-           permits != rcu_access_pointer(vnode->permit_cache))
-               goto someone_else_changed_it_unlock;
-       rcu_assign_pointer(vnode->permit_cache, replacement);
+       zap = rcu_access_pointer(vnode->permit_cache);
+       if (cb_break == (vnode->cb_break + vnode->cb_interest->server->cb_s_break) &&
+           zap == permits)
+               rcu_assign_pointer(vnode->permit_cache, replacement);
+       else
+               zap = replacement;
        spin_unlock(&vnode->lock);
+       afs_put_permits(zap);
+out_put:
        afs_put_permits(permits);
        return;
 
index d3f97da61bdfc6b006b88a92a21237dcea333b62..1037dd41a62210a3568c5a5144ffdc97273c20c3 100644 (file)
@@ -536,7 +536,9 @@ static void afs_kill_super(struct super_block *sb)
 }
 
 /*
- * initialise an inode cache slab element prior to any use
+ * Initialise an inode cache slab element prior to any use.  Note that
+ * afs_alloc_inode() *must* reset anything that could incorrectly leak from one
+ * inode to another.
  */
 static void afs_i_init_once(void *_vnode)
 {
@@ -568,11 +570,21 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
 
        atomic_inc(&afs_count_active_inodes);
 
+       /* Reset anything that shouldn't leak from one inode to the next. */
        memset(&vnode->fid, 0, sizeof(vnode->fid));
        memset(&vnode->status, 0, sizeof(vnode->status));
 
        vnode->volume           = NULL;
+       vnode->lock_key         = NULL;
+       vnode->permit_cache     = NULL;
+       vnode->cb_interest      = NULL;
+#ifdef CONFIG_AFS_FSCACHE
+       vnode->cache            = NULL;
+#endif
+
        vnode->flags            = 1 << AFS_VNODE_UNSET;
+       vnode->cb_type          = 0;
+       vnode->lock_state       = AFS_VNODE_LOCK_NONE;
 
        _leave(" = %p", &vnode->vfs_inode);
        return &vnode->vfs_inode;
index d79ced9258614010128dd8f1ed6a0cff2b525f21..82e8f6edfb48d0e8670dd58e3fbdcfb4b5ceb85d 100644 (file)
@@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
                pr_debug("waiting for mount name=%pd\n", path->dentry);
                status = autofs4_wait(sbi, path, NFY_MOUNT);
                pr_debug("mount wait done status=%d\n", status);
-               ino->last_used = jiffies;
        }
+       ino->last_used = jiffies;
        return status;
 }
 
@@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
         */
        if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
                struct dentry *parent = dentry->d_parent;
+               struct autofs_info *ino;
                struct dentry *new;
 
                new = d_lookup(parent, &dentry->d_name);
                if (!new)
                        return NULL;
-               if (new == dentry)
-                       dput(new);
-               else {
-                       struct autofs_info *ino;
-
-                       ino = autofs4_dentry_ino(new);
-                       ino->last_used = jiffies;
-                       dput(path->dentry);
-                       path->dentry = new;
-               }
+               ino = autofs4_dentry_ino(new);
+               ino->last_used = jiffies;
+               dput(path->dentry);
+               path->dentry = new;
        }
        return path->dentry;
 }
index 95981591977a04d08f300c0795fcd96a4211adc1..78b72c48374e5eed09587292f3b7eee62059e18b 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -627,7 +627,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
 
                        if (pfn != pmd_pfn(*pmdp))
                                goto unlock_pmd;
-                       if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+                       if (!pmd_dirty(*pmdp)
+                                       && !pmd_access_permitted(*pmdp, WRITE))
                                goto unlock_pmd;
 
                        flush_cache_page(vma, address, pfn);
index 1d6243d9f2b653e679165099be9332776805b8bd..6be2aa0ab26fe26cb37032b99bba656f8d7c6b51 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm)
                 * avoid bad behavior from the prior rlimits. This has to
                 * happen before arch_pick_mmap_layout(), which examines
                 * RLIMIT_STACK, but after the point of no return to avoid
-                * needing to clean up the change on failure.
+                * races from other threads changing the limits. This also
+                * must be protected from races with prlimit() calls.
                 */
+               task_lock(current->group_leader);
                if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
                        current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
+               if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
+                       current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
+               task_unlock(current->group_leader);
        }
 
        arch_pick_mmap_layout(current->mm);
index 016c46b5e44c9770ff55ba432f5bc2fe19133440..20a0a89eaca589de58d70d89c9625ca9a30d0143 100644 (file)
@@ -779,7 +779,7 @@ static void __exit fat_destroy_inodecache(void)
 
 static int fat_remount(struct super_block *sb, int *flags, char *data)
 {
-       int new_rdonly;
+       bool new_rdonly;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
        *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
 
index 1e76730aac0deb99df8e39165d959063da93225d..8a85f3f53446521991550583a0f106dd7af042c7 100644 (file)
@@ -639,11 +639,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
                /*
-                * page_put due to reference from alloc_huge_page()
                 * unlock_page because locked by add_to_page_cache()
+                * page_put due to reference from alloc_huge_page()
                 */
-               put_page(page);
                unlock_page(page);
+               put_page(page);
        }
 
        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
index d818fd23678700bf8435af2fa64ed62dfdba4d2c..b8b8b9ced9f81c47a1f76da3ef61c2f9d4645c78 100644 (file)
@@ -269,6 +269,9 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
        struct mb_cache *cache = container_of(shrink, struct mb_cache,
                                              c_shrink);
 
+       /* Unlikely, but not impossible */
+       if (unlikely(cache->c_entry_count < 0))
+               return 0;
        return cache->c_entry_count;
 }
 
index f0c7a7b9b6ca7562217746369cd8d5d82d43a99f..9cc91fb7f156541bd53243b35c2823bbf9ca1133 100644 (file)
@@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
         * of the daemon to instantiate them before they can be used.
         */
        if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                          LOOKUP_OPEN | LOOKUP_CREATE |
-                          LOOKUP_AUTOMOUNT))) {
-               /* Positive dentry that isn't meant to trigger an
-                * automount, EISDIR will allow it to be used,
-                * otherwise there's no mount here "now" so return
-                * ENOENT.
-                */
-               if (path->dentry->d_inode)
-                       return -EISDIR;
-               else
-                       return -ENOENT;
-       }
+                          LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
 
        if (path->dentry->d_sb->s_user_ns != &init_user_ns)
                return -EACCES;
index 54fd56d715a8c34a20e0dae0054bfee16eb830bf..e4f4a09ed9f49afd5841852ee82647b3dc286e13 100644 (file)
@@ -71,8 +71,8 @@ const nfs4_stateid zero_stateid = {
 };
 const nfs4_stateid invalid_stateid = {
        {
-               .seqid = cpu_to_be32(0xffffffffU),
-               .other = { 0 },
+               /* Funky initialiser keeps older gcc versions happy */
+               .data = { 0xff, 0xff, 0xff, 0xff, 0 },
        },
        .type = NFS4_INVALID_STATEID_TYPE,
 };
index 39f1b0b0c76fbb24cec8b2388cbcfc11dfb0ae4b..020c597ef9b6e66a74f786d70302238d32dc729a 100644 (file)
@@ -941,12 +941,13 @@ static int dqinit_needed(struct inode *inode, int type)
 }
 
 /* This routine is guarded by s_umount semaphore */
-static void add_dquot_ref(struct super_block *sb, int type)
+static int add_dquot_ref(struct super_block *sb, int type)
 {
        struct inode *inode, *old_inode = NULL;
 #ifdef CONFIG_QUOTA_DEBUG
        int reserved = 0;
 #endif
+       int err = 0;
 
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
@@ -966,7 +967,11 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        reserved = 1;
 #endif
                iput(old_inode);
-               __dquot_initialize(inode, type);
+               err = __dquot_initialize(inode, type);
+               if (err) {
+                       iput(inode);
+                       goto out;
+               }
 
                /*
                 * We hold a reference to 'inode' so it couldn't have been
@@ -981,7 +986,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
        }
        spin_unlock(&sb->s_inode_list_lock);
        iput(old_inode);
-
+out:
 #ifdef CONFIG_QUOTA_DEBUG
        if (reserved) {
                quota_error(sb, "Writes happened before quota was turned on "
@@ -989,6 +994,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        "Please run quotacheck(8)");
        }
 #endif
+       return err;
 }
 
 /*
@@ -2379,10 +2385,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        dqopt->flags |= dquot_state_flag(flags, type);
        spin_unlock(&dq_state_lock);
 
-       add_dquot_ref(sb, type);
-
-       return 0;
+       error = add_dquot_ref(sb, type);
+       if (error)
+               dquot_disable(sb, type, flags);
 
+       return error;
 out_file_init:
        dqopt->files[type] = NULL;
        iput(inode);
@@ -2985,7 +2992,8 @@ static int __init dquot_init(void)
        pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
                " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
 
-       register_shrinker(&dqcache_shrinker);
+       if (register_shrinker(&dqcache_shrinker))
+               panic("Cannot register dquot shrinker");
 
        return 0;
 }
index 020c9cacbb2f30b93d67cfba383c21a9952b7e5c..1fc934d244592e2df6ee902e8606f39cd313055d 100644 (file)
@@ -2591,7 +2591,6 @@ out:
                return err;
        if (inode->i_size < off + len - towrite)
                i_size_write(inode, off + len - towrite);
-       inode->i_version++;
        inode->i_mtime = inode->i_ctime = current_time(inode);
        mark_inode_dirty(inode);
        return len - towrite;
index 08df809e231521eace21df09737d6bb4fff99659..1210f684d3c28f9af8d8403c1f0222ef06dc380b 100644 (file)
@@ -5662,7 +5662,8 @@ xfs_bmap_collapse_extents(
                *done = true;
                goto del_cursor;
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
+       XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
+                               del_cursor);
 
        new_startoff = got.br_startoff - offset_shift_fsb;
        if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
@@ -5767,7 +5768,8 @@ xfs_bmap_insert_extents(
                        goto del_cursor;
                }
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
+       XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
+                               del_cursor);
 
        if (stop_fsb >= got.br_startoff + got.br_blockcount) {
                error = -EIO;
index 637b7a892313de51ed28f584b2b2b17f8e32a40c..f120fb20452f493ecd6fd29bfa09732770f28015 100644 (file)
@@ -318,8 +318,20 @@ xfs_scrub_dinode(
 
        /* di_mode */
        mode = be16_to_cpu(dip->di_mode);
-       if (mode & ~(S_IALLUGO | S_IFMT))
+       switch (mode & S_IFMT) {
+       case S_IFLNK:
+       case S_IFREG:
+       case S_IFDIR:
+       case S_IFCHR:
+       case S_IFBLK:
+       case S_IFIFO:
+       case S_IFSOCK:
+               /* mode is recognized */
+               break;
+       default:
                xfs_scrub_ino_set_corrupt(sc, ino, bp);
+               break;
+       }
 
        /* v1/v2 fields */
        switch (dip->di_version) {
index 8e58ba8429464d7b57691fb06caf85eaaf6b13f6..3d9037eceaf1b81c2848e056bdc8e9309bc4d87f 100644 (file)
@@ -107,7 +107,7 @@ xfs_scrub_quota_item(
        unsigned long long              rcount;
        xfs_ino_t                       fs_icount;
 
-       offset = id * qi->qi_dqperchunk;
+       offset = id / qi->qi_dqperchunk;
 
        /*
         * We fed $id and DQNEXT into the xfs_qm_dqget call, which means
@@ -207,7 +207,7 @@ xfs_scrub_quota(
        xfs_dqid_t                      id = 0;
        uint                            dqtype;
        int                             nimaps;
-       int                             error;
+       int                             error = 0;
 
        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return -ENOENT;
index a3eeaba156c5ab8d7d34e6b4f217423c452b73dc..21e2d70884e18edc2c765584f201a8b04604837c 100644 (file)
@@ -399,7 +399,7 @@ xfs_map_blocks(
               (ip->i_df.if_flags & XFS_IFEXTENTS));
        ASSERT(offset <= mp->m_super->s_maxbytes);
 
-       if (offset + count > mp->m_super->s_maxbytes)
+       if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
                count = mp->m_super->s_maxbytes - offset;
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -896,13 +896,13 @@ xfs_writepage_map(
        struct writeback_control *wbc,
        struct inode            *inode,
        struct page             *page,
-       loff_t                  offset,
-       uint64_t              end_offset)
+       uint64_t                end_offset)
 {
        LIST_HEAD(submit_list);
        struct xfs_ioend        *ioend, *next;
        struct buffer_head      *bh, *head;
        ssize_t                 len = i_blocksize(inode);
+       uint64_t                offset;
        int                     error = 0;
        int                     count = 0;
        int                     uptodate = 1;
@@ -1146,7 +1146,7 @@ xfs_do_writepage(
                end_offset = offset;
        }
 
-       return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
+       return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
 
 redirty:
        redirty_page_for_writepage(wbc, page);
@@ -1265,7 +1265,7 @@ xfs_map_trim_size(
        if (mapping_size > size)
                mapping_size = size;
        if (offset < i_size_read(inode) &&
-           offset + mapping_size >= i_size_read(inode)) {
+           (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
                /* limit mapping to block that spans EOF */
                mapping_size = roundup_64(i_size_read(inode) - offset,
                                          i_blocksize(inode));
@@ -1312,7 +1312,7 @@ xfs_get_blocks(
        lockmode = xfs_ilock_data_map_shared(ip);
 
        ASSERT(offset <= mp->m_super->s_maxbytes);
-       if (offset + size > mp->m_super->s_maxbytes)
+       if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
                size = mp->m_super->s_maxbytes - offset;
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
index dd136f7275e4a2efede5a7612de243ddd1349abf..e5fb008d75e899aaa26948ae5044e55e59ca6f11 100644 (file)
@@ -389,7 +389,8 @@ xfs_bud_init(
 int
 xfs_bui_recover(
        struct xfs_mount                *mp,
-       struct xfs_bui_log_item         *buip)
+       struct xfs_bui_log_item         *buip,
+       struct xfs_defer_ops            *dfops)
 {
        int                             error = 0;
        unsigned int                    bui_type;
@@ -404,9 +405,7 @@ xfs_bui_recover(
        xfs_exntst_t                    state;
        struct xfs_trans                *tp;
        struct xfs_inode                *ip = NULL;
-       struct xfs_defer_ops            dfops;
        struct xfs_bmbt_irec            irec;
-       xfs_fsblock_t                   firstfsb;
 
        ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
 
@@ -464,7 +463,6 @@ xfs_bui_recover(
 
        if (VFS_I(ip)->i_nlink == 0)
                xfs_iflags_set(ip, XFS_IRECOVERY);
-       xfs_defer_init(&dfops, &firstfsb);
 
        /* Process deferred bmap item. */
        state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
@@ -479,16 +477,16 @@ xfs_bui_recover(
                break;
        default:
                error = -EFSCORRUPTED;
-               goto err_dfops;
+               goto err_inode;
        }
        xfs_trans_ijoin(tp, ip, 0);
 
        count = bmap->me_len;
-       error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
+       error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
                        ip, whichfork, bmap->me_startoff,
                        bmap->me_startblock, &count, state);
        if (error)
-               goto err_dfops;
+               goto err_inode;
 
        if (count > 0) {
                ASSERT(type == XFS_BMAP_UNMAP);
@@ -496,16 +494,11 @@ xfs_bui_recover(
                irec.br_blockcount = count;
                irec.br_startoff = bmap->me_startoff;
                irec.br_state = state;
-               error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
+               error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
                if (error)
-                       goto err_dfops;
+                       goto err_inode;
        }
 
-       /* Finish transaction, free inodes. */
-       error = xfs_defer_finish(&tp, &dfops);
-       if (error)
-               goto err_dfops;
-
        set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
        error = xfs_trans_commit(tp);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -513,8 +506,6 @@ xfs_bui_recover(
 
        return error;
 
-err_dfops:
-       xfs_defer_cancel(&dfops);
 err_inode:
        xfs_trans_cancel(tp);
        if (ip) {
index c867daae4a3ce54c97055e133b478dd3baf6db61..24b354a2c83641487acfeb76a336a6476b9b98b3 100644 (file)
@@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
                struct xfs_bui_log_item *);
 void xfs_bui_item_free(struct xfs_bui_log_item *);
 void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip);
+int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
+               struct xfs_defer_ops *dfops);
 
 #endif /* __XFS_BMAP_ITEM_H__ */
index 4db6e8d780f6962475348a8dd318185a011250e8..4c6e86d861fda1a452dbeb20c1d3d64018627a20 100644 (file)
@@ -1815,22 +1815,27 @@ xfs_alloc_buftarg(
        btp->bt_daxdev = dax_dev;
 
        if (xfs_setsize_buftarg_early(btp, bdev))
-               goto error;
+               goto error_free;
 
        if (list_lru_init(&btp->bt_lru))
-               goto error;
+               goto error_free;
 
        if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
-               goto error;
+               goto error_lru;
 
        btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
        btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
        btp->bt_shrinker.seeks = DEFAULT_SEEKS;
        btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
-       register_shrinker(&btp->bt_shrinker);
+       if (register_shrinker(&btp->bt_shrinker))
+               goto error_pcpu;
        return btp;
 
-error:
+error_pcpu:
+       percpu_counter_destroy(&btp->bt_io_count);
+error_lru:
+       list_lru_destroy(&btp->bt_lru);
+error_free:
        kmem_free(btp);
        return NULL;
 }
index d57c2db64e59385450b273757d7fe9798205bd5f..f248708c10ff7f64e61fb3cde307f42031322424 100644 (file)
@@ -970,14 +970,22 @@ xfs_qm_dqflush_done(
         * holding the lock before removing the dquot from the AIL.
         */
        if ((lip->li_flags & XFS_LI_IN_AIL) &&
-           lip->li_lsn == qip->qli_flush_lsn) {
+           ((lip->li_lsn == qip->qli_flush_lsn) ||
+            (lip->li_flags & XFS_LI_FAILED))) {
 
                /* xfs_trans_ail_delete() drops the AIL lock. */
                spin_lock(&ailp->xa_lock);
-               if (lip->li_lsn == qip->qli_flush_lsn)
+               if (lip->li_lsn == qip->qli_flush_lsn) {
                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
-               else
+               } else {
+                       /*
+                        * Clear the failed state since we are about to drop the
+                        * flush lock
+                        */
+                       if (lip->li_flags & XFS_LI_FAILED)
+                               xfs_clear_li_failed(lip);
                        spin_unlock(&ailp->xa_lock);
+               }
        }
 
        /*
index 2c7a1629e064b4fd1f647affc3432797d058ae26..664dea105e76fee564a1feeb16a05387fe6b9000 100644 (file)
@@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
        wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
 }
 
+/*
+ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
+ * have been failed during writeback
+ *
+ * this informs the AIL that the dquot is already flush locked on the next push,
+ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
+ * dirty data makes it to disk.
+ */
+STATIC void
+xfs_dquot_item_error(
+       struct xfs_log_item     *lip,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dquot        *dqp;
+
+       dqp = DQUOT_ITEM(lip)->qli_dquot;
+       ASSERT(!completion_done(&dqp->q_flush));
+       xfs_set_li_failed(lip, bp);
+}
+
 STATIC uint
 xfs_qm_dquot_logitem_push(
        struct xfs_log_item     *lip,
@@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
                                              __acquires(&lip->li_ailp->xa_lock)
 {
        struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
-       struct xfs_buf          *bp = NULL;
+       struct xfs_buf          *bp = lip->li_buf;
        uint                    rval = XFS_ITEM_SUCCESS;
        int                     error;
 
        if (atomic_read(&dqp->q_pincount) > 0)
                return XFS_ITEM_PINNED;
 
+       /*
+        * The buffer containing this item failed to be written back
+        * previously. Resubmit the buffer for IO
+        */
+       if (lip->li_flags & XFS_LI_FAILED) {
+               if (!xfs_buf_trylock(bp))
+                       return XFS_ITEM_LOCKED;
+
+               if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+                       rval = XFS_ITEM_FLUSHING;
+
+               xfs_buf_unlock(bp);
+               return rval;
+       }
+
        if (!xfs_dqlock_nowait(dqp))
                return XFS_ITEM_LOCKED;
 
@@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
        .iop_unlock     = xfs_qm_dquot_logitem_unlock,
        .iop_committed  = xfs_qm_dquot_logitem_committed,
        .iop_push       = xfs_qm_dquot_logitem_push,
-       .iop_committing = xfs_qm_dquot_logitem_committing
+       .iop_committing = xfs_qm_dquot_logitem_committing,
+       .iop_error      = xfs_dquot_item_error
 };
 
 /*
index 61d1cb7dc10d25dd894624d6094973b9c25328a6..8012741266488ab4e0724b68aadb2742c1d29c2c 100644 (file)
@@ -2400,6 +2400,24 @@ retry:
        return 0;
 }
 
+/*
+ * Free any local-format buffers sitting around before we reset to
+ * extents format.
+ */
+static inline void
+xfs_ifree_local_data(
+       struct xfs_inode        *ip,
+       int                     whichfork)
+{
+       struct xfs_ifork        *ifp;
+
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+               return;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+}
+
 /*
  * This is called to return an inode to the inode free list.
  * The inode should already be truncated to 0 length and have
@@ -2437,6 +2455,9 @@ xfs_ifree(
        if (error)
                return error;
 
+       xfs_ifree_local_data(ip, XFS_DATA_FORK);
+       xfs_ifree_local_data(ip, XFS_ATTR_FORK);
+
        VFS_I(ip)->i_mode = 0;          /* mark incore inode as free */
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
index 87b1c331f9ebfb7cefb708adc47b55890bf7ab9d..28d1abfe835eef3e9d87f7da1c7c805fef0488f4 100644 (file)
@@ -24,6 +24,7 @@
 #include "xfs_bit.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_defer.h"
 #include "xfs_da_format.h"
 #include "xfs_da_btree.h"
 #include "xfs_inode.h"
@@ -4716,7 +4717,8 @@ STATIC int
 xlog_recover_process_cui(
        struct xfs_mount                *mp,
        struct xfs_ail                  *ailp,
-       struct xfs_log_item             *lip)
+       struct xfs_log_item             *lip,
+       struct xfs_defer_ops            *dfops)
 {
        struct xfs_cui_log_item         *cuip;
        int                             error;
@@ -4729,7 +4731,7 @@ xlog_recover_process_cui(
                return 0;
 
        spin_unlock(&ailp->xa_lock);
-       error = xfs_cui_recover(mp, cuip);
+       error = xfs_cui_recover(mp, cuip, dfops);
        spin_lock(&ailp->xa_lock);
 
        return error;
@@ -4756,7 +4758,8 @@ STATIC int
 xlog_recover_process_bui(
        struct xfs_mount                *mp,
        struct xfs_ail                  *ailp,
-       struct xfs_log_item             *lip)
+       struct xfs_log_item             *lip,
+       struct xfs_defer_ops            *dfops)
 {
        struct xfs_bui_log_item         *buip;
        int                             error;
@@ -4769,7 +4772,7 @@ xlog_recover_process_bui(
                return 0;
 
        spin_unlock(&ailp->xa_lock);
-       error = xfs_bui_recover(mp, buip);
+       error = xfs_bui_recover(mp, buip, dfops);
        spin_lock(&ailp->xa_lock);
 
        return error;
@@ -4805,6 +4808,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
        }
 }
 
+/* Take all the collected deferred ops and finish them in order. */
+static int
+xlog_finish_defer_ops(
+       struct xfs_mount        *mp,
+       struct xfs_defer_ops    *dfops)
+{
+       struct xfs_trans        *tp;
+       int64_t                 freeblks;
+       uint                    resblks;
+       int                     error;
+
+       /*
+        * We're finishing the defer_ops that accumulated as a result of
+        * recovering unfinished intent items during log recovery.  We
+        * reserve an itruncate transaction because it is the largest
+        * permanent transaction type.  Since we're the only user of the fs
+        * right now, take 93% (15/16) of the available free blocks.  Use
+        * weird math to avoid a 64-bit division.
+        */
+       freeblks = percpu_counter_sum(&mp->m_fdblocks);
+       if (freeblks <= 0)
+               return -ENOSPC;
+       resblks = min_t(int64_t, UINT_MAX, freeblks);
+       resblks = (resblks * 15) >> 4;
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
+                       0, XFS_TRANS_RESERVE, &tp);
+       if (error)
+               return error;
+
+       error = xfs_defer_finish(&tp, dfops);
+       if (error)
+               goto out_cancel;
+
+       return xfs_trans_commit(tp);
+
+out_cancel:
+       xfs_trans_cancel(tp);
+       return error;
+}
+
 /*
  * When this is called, all of the log intent items which did not have
  * corresponding log done items should be in the AIL.  What we do now
@@ -4825,10 +4868,12 @@ STATIC int
 xlog_recover_process_intents(
        struct xlog             *log)
 {
-       struct xfs_log_item     *lip;
-       int                     error = 0;
+       struct xfs_defer_ops    dfops;
        struct xfs_ail_cursor   cur;
+       struct xfs_log_item     *lip;
        struct xfs_ail          *ailp;
+       xfs_fsblock_t           firstfsb;
+       int                     error = 0;
 #if defined(DEBUG) || defined(XFS_WARN)
        xfs_lsn_t               last_lsn;
 #endif
@@ -4839,6 +4884,7 @@ xlog_recover_process_intents(
 #if defined(DEBUG) || defined(XFS_WARN)
        last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
 #endif
+       xfs_defer_init(&dfops, &firstfsb);
        while (lip != NULL) {
                /*
                 * We're done when we see something other than an intent.
@@ -4859,6 +4905,12 @@ xlog_recover_process_intents(
                 */
                ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
 
+               /*
+                * NOTE: If your intent processing routine can create more
+                * deferred ops, you /must/ attach them to the dfops in this
+                * routine or else those subsequent intents will get
+                * replayed in the wrong order!
+                */
                switch (lip->li_type) {
                case XFS_LI_EFI:
                        error = xlog_recover_process_efi(log->l_mp, ailp, lip);
@@ -4867,10 +4919,12 @@ xlog_recover_process_intents(
                        error = xlog_recover_process_rui(log->l_mp, ailp, lip);
                        break;
                case XFS_LI_CUI:
-                       error = xlog_recover_process_cui(log->l_mp, ailp, lip);
+                       error = xlog_recover_process_cui(log->l_mp, ailp, lip,
+                                       &dfops);
                        break;
                case XFS_LI_BUI:
-                       error = xlog_recover_process_bui(log->l_mp, ailp, lip);
+                       error = xlog_recover_process_bui(log->l_mp, ailp, lip,
+                                       &dfops);
                        break;
                }
                if (error)
@@ -4880,6 +4934,11 @@ xlog_recover_process_intents(
 out:
        xfs_trans_ail_cursor_done(&cur);
        spin_unlock(&ailp->xa_lock);
+       if (error)
+               xfs_defer_cancel(&dfops);
+       else
+               error = xlog_finish_defer_ops(log->l_mp, &dfops);
+
        return error;
 }
 
index 8f2e2fac4255d63fc1dab2d268433e099aaf5aa8..3a55d6fc271b1e6d50aa6ce96c9e84b7c5886e43 100644 (file)
@@ -393,7 +393,8 @@ xfs_cud_init(
 int
 xfs_cui_recover(
        struct xfs_mount                *mp,
-       struct xfs_cui_log_item         *cuip)
+       struct xfs_cui_log_item         *cuip,
+       struct xfs_defer_ops            *dfops)
 {
        int                             i;
        int                             error = 0;
@@ -405,11 +406,9 @@ xfs_cui_recover(
        struct xfs_trans                *tp;
        struct xfs_btree_cur            *rcur = NULL;
        enum xfs_refcount_intent_type   type;
-       xfs_fsblock_t                   firstfsb;
        xfs_fsblock_t                   new_fsb;
        xfs_extlen_t                    new_len;
        struct xfs_bmbt_irec            irec;
-       struct xfs_defer_ops            dfops;
        bool                            requeue_only = false;
 
        ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
@@ -465,7 +464,6 @@ xfs_cui_recover(
                return error;
        cudp = xfs_trans_get_cud(tp, cuip);
 
-       xfs_defer_init(&dfops, &firstfsb);
        for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
                refc = &cuip->cui_format.cui_extents[i];
                refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
@@ -485,7 +483,7 @@ xfs_cui_recover(
                        new_len = refc->pe_len;
                } else
                        error = xfs_trans_log_finish_refcount_update(tp, cudp,
-                               &dfops, type, refc->pe_startblock, refc->pe_len,
+                               dfops, type, refc->pe_startblock, refc->pe_len,
                                &new_fsb, &new_len, &rcur);
                if (error)
                        goto abort_error;
@@ -497,21 +495,21 @@ xfs_cui_recover(
                        switch (type) {
                        case XFS_REFCOUNT_INCREASE:
                                error = xfs_refcount_increase_extent(
-                                               tp->t_mountp, &dfops, &irec);
+                                               tp->t_mountp, dfops, &irec);
                                break;
                        case XFS_REFCOUNT_DECREASE:
                                error = xfs_refcount_decrease_extent(
-                                               tp->t_mountp, &dfops, &irec);
+                                               tp->t_mountp, dfops, &irec);
                                break;
                        case XFS_REFCOUNT_ALLOC_COW:
                                error = xfs_refcount_alloc_cow_extent(
-                                               tp->t_mountp, &dfops,
+                                               tp->t_mountp, dfops,
                                                irec.br_startblock,
                                                irec.br_blockcount);
                                break;
                        case XFS_REFCOUNT_FREE_COW:
                                error = xfs_refcount_free_cow_extent(
-                                               tp->t_mountp, &dfops,
+                                               tp->t_mountp, dfops,
                                                irec.br_startblock,
                                                irec.br_blockcount);
                                break;
@@ -525,17 +523,12 @@ xfs_cui_recover(
        }
 
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
-       error = xfs_defer_finish(&tp, &dfops);
-       if (error)
-               goto abort_defer;
        set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
        error = xfs_trans_commit(tp);
        return error;
 
 abort_error:
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
-abort_defer:
-       xfs_defer_cancel(&dfops);
        xfs_trans_cancel(tp);
        return error;
 }
index 5b74dddfa64be728f74e7dd1f731983c1523ad1a..0e5327349a13ee5921808ed866ac02acc038d0df 100644 (file)
@@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
                struct xfs_cui_log_item *);
 void xfs_cui_item_free(struct xfs_cui_log_item *);
 void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip);
+int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
+               struct xfs_defer_ops *dfops);
 
 #endif /* __XFS_REFCOUNT_ITEM_H__ */
index f849be28e0826683a496b7aaedf1f4c76749a95f..79287629c888dd735547e9c727e2d24ed523b241 100644 (file)
@@ -105,6 +105,7 @@ enum acpi_bus_device_type {
        ACPI_BUS_TYPE_THERMAL,
        ACPI_BUS_TYPE_POWER_BUTTON,
        ACPI_BUS_TYPE_SLEEP_BUTTON,
+       ACPI_BUS_TYPE_ECDT_EC,
        ACPI_BUS_DEVICE_TYPE_COUNT
 };
 
index 29c691265b49357bc0d036b71897348806c58e6c..14499757338f65416835330254b8c90a06918d64 100644 (file)
@@ -58,6 +58,7 @@
 #define ACPI_VIDEO_HID                 "LNXVIDEO"
 #define ACPI_BAY_HID                   "LNXIOBAY"
 #define ACPI_DOCK_HID                  "LNXDOCK"
+#define ACPI_ECDT_HID                  "LNXEC"
 /* Quirk for broken IBM BIOSes */
 #define ACPI_SMBUS_IBM_HID             "SMBUSIBM"
 
index 757dc6ffc7ba5f294bae554af3e6d1a01c1207e5..b234d54f2cb6e4c23a21db2af3b225264eccae2a 100644 (file)
@@ -805,15 +805,23 @@ static inline int pmd_trans_huge(pmd_t pmd)
 {
        return 0;
 }
-#ifndef __HAVE_ARCH_PMD_WRITE
+#ifndef pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        BUG();
        return 0;
 }
-#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* pmd_write */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+       BUG();
+       return 0;
+}
+#endif /* pud_write */
+
 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
        (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
         !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
index 38a2b4770c35f0d440867851a3424d706588d989..593811362a9172fd49fa14f62567af2ee636acc6 100644 (file)
@@ -58,12 +58,21 @@ int ttm_pool_populate(struct ttm_tt *ttm);
  */
 void ttm_pool_unpopulate(struct ttm_tt *ttm);
 
+/**
+ * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
+ */
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
+ * Unpopulates and DMA unmaps pages as part of a
+ * ttm_dma_unpopulate() request */
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
+
 /**
  * Output the state of pools to debugfs file
  */
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 
-
 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
@@ -83,17 +92,6 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
 #else
 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
                                          unsigned max_pages)
@@ -116,16 +114,6 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
                                      struct device *dev)
 {
 }
-
-static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-       return -ENOMEM;
-}
-
-static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-}
-
 #endif
 
 #endif
diff --git a/include/lib/libgcc.h b/include/lib/libgcc.h
deleted file mode 100644 (file)
index 32e1e0f..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * include/lib/libgcc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.
- */
-
-#ifndef __LIB_LIBGCC_H
-#define __LIB_LIBGCC_H
-
-#include <asm/byteorder.h>
-
-typedef int word_type __attribute__ ((mode (__word__)));
-
-#ifdef __BIG_ENDIAN
-struct DWstruct {
-       int high, low;
-};
-#elif defined(__LITTLE_ENDIAN)
-struct DWstruct {
-       int low, high;
-};
-#else
-#error I feel sick.
-#endif
-
-typedef union {
-       struct DWstruct s;
-       long long ll;
-} DWunion;
-
-#endif /* __ASM_LIBGCC_H */
index bbd92da0946e1c20ede60bc27780844dfbde8c26..511fbaabf6248b67220c16653e491f74e3f046e7 100644 (file)
@@ -3088,7 +3088,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
 static inline int vfs_fstatat(int dfd, const char __user *filename,
                              struct kstat *stat, int flags)
 {
-       return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+       return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+                        stat, STATX_BASIC_STATS);
 }
 static inline int vfs_fstat(int fd, struct kstat *stat)
 {
@@ -3194,6 +3195,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
        return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
 }
 
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+       struct inode *inode;
+
+       if (!vma->vm_file)
+               return false;
+       if (!vma_is_dax(vma))
+               return false;
+       inode = file_inode(vma->vm_file);
+       if (inode->i_mode == S_IFCHR)
+               return false; /* device-dax */
+       return true;
+}
+
 static inline int iocb_flags(struct file *file)
 {
        int res = 0;
index fbf5b31d47eea91925b9275b9f7fa2784cd5fe56..82a25880714ac69860322edc3e69b4a81b83fb62 100644 (file)
@@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
 }
 #endif
 
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
-       BUG();
-       return 0;
-}
-#endif
-
 #define HUGETLB_ANON_FILE "anon_hugepage"
 
 enum {
index 2e754b7c282c8324778b60e7ea57940d9f72c22d..893d6d606cd0a9023e2ab8ef27c523ba8a959a06 100644 (file)
@@ -715,6 +715,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
                         unsigned long len);
 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
new file mode 100644 (file)
index 0000000..32e1e0f
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * include/lib/libgcc.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#ifndef __LIB_LIBGCC_H
+#define __LIB_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+       int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+       int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+       struct DWstruct s;
+       long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
index 895ec0c4942e68c43ca49f1fb77f5112344ca05a..a2246cf670badb96e6c11c4d13b233db0c93388f 100644 (file)
@@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
        new_page = __alloc_pages_nodemask(gfp_mask, order,
                                preferred_nid, nodemask);
 
-       if (new_page && PageTransHuge(page))
+       if (new_page && PageTransHuge(new_page))
                prep_transhuge_page(new_page);
 
        return new_page;
index ee073146aaa7c0085d4e212726be5d60ee317e5a..ea818ff739cdfbb433fc10634ed5ac77eacbc5b7 100644 (file)
@@ -377,6 +377,7 @@ enum page_entry_size {
 struct vm_operations_struct {
        void (*open)(struct vm_area_struct * area);
        void (*close)(struct vm_area_struct * area);
+       int (*split)(struct vm_area_struct * area, unsigned long addr);
        int (*mremap)(struct vm_area_struct * area);
        int (*fault)(struct vm_fault *vmf);
        int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
@@ -1379,6 +1380,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    unsigned int gup_flags, struct page **pages, int *locked);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+                           unsigned int gup_flags, struct page **pages,
+                           struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+               unsigned long nr_pages, unsigned int gup_flags,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
index 2c9c87d8a0c18e5f5c1cf2a8e148504e4f3ad3a9..7546822a1d74f198d5c521bc2864c369e38d70e4 100644 (file)
@@ -15,6 +15,7 @@
 #define _LINUX_PERF_EVENT_H
 
 #include <uapi/linux/perf_event.h>
+#include <uapi/linux/bpf_perf_event.h>
 
 /*
  * Kernel-internal data types and definitions:
@@ -787,7 +788,7 @@ struct perf_output_handle {
 };
 
 struct bpf_perf_event_data_kern {
-       struct pt_regs *regs;
+       bpf_user_pt_regs_t *regs;
        struct perf_sample_data *data;
        struct perf_event *event;
 };
@@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
 # define perf_instruction_pointer(regs)        instruction_pointer(regs)
 #endif
+#ifndef perf_arch_bpf_user_pt_regs
+# define perf_arch_bpf_user_pt_regs(regs) regs
+#endif
 
 static inline bool has_branch_stack(struct perf_event *event)
 {
index a328e8181e49f3a0947dd713daeef35b9d7c831f..e4b257ff881bfe439a945d7487f5700f17a26740 100644 (file)
@@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
                first->pprev = &n->next;
 }
 
-/**
- * hlist_nulls_add_tail_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals.  NOTE: tail insertion requires
- * list traversal.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
- * or hlist_nulls_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.  Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-                                       struct hlist_nulls_head *h)
-{
-       struct hlist_nulls_node *i, *last = NULL;
-
-       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-            i = hlist_nulls_next_rcu(i))
-               last = i;
-
-       if (last) {
-               n->next = last->next;
-               n->pprev = &last->next;
-               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-       } else {
-               hlist_nulls_add_head_rcu(n, h);
-       }
-}
-
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
index df5d97a85e1a6674e76dcc51ef54a250cbbdb539..ca4a6361389b8a3b268ca5b0f4778662a1f7d315 100644 (file)
@@ -224,7 +224,8 @@ struct tcp_sock {
                rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
                fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
                fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
-               unused:3;
+               is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */
+               unused:2;
        u8      nonagle     : 4,/* Disable Nagle algorithm?             */
                thin_lto    : 1,/* Use linear timeouts for thin streams */
                unused1     : 1,
index a69877734c4eb033b7a7a7f2989e1da241793782..e2ec3582e54937d3818afed3e253440fc23541a0 100644 (file)
@@ -82,6 +82,7 @@ struct usbnet {
 #              define EVENT_RX_KILL    10
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
+#              define EVENT_NO_IP_ALIGN        13
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 9a9347710701458a74953ef0407714865a13298a..9665582c4687e41bf5dd081894c0be89a40b89b6 100644 (file)
@@ -168,6 +168,17 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+{
+       if (fls(qth_min) + Wlog > 32)
+               return false;
+       if (fls(qth_max) + Wlog > 32)
+               return false;
+       if (qth_max < qth_min)
+               return false;
+       return true;
+}
+
 static inline void red_set_parms(struct red_parms *p,
                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
                                 u8 Scell_log, u8 *stab, u32 max_P)
@@ -179,7 +190,7 @@ static inline void red_set_parms(struct red_parms *p,
        p->qth_max      = qth_max << Wlog;
        p->Wlog         = Wlog;
        p->Plog         = Plog;
-       if (delta < 0)
+       if (delta <= 0)
                delta = 1;
        p->qth_delta    = delta;
        if (!max_P) {
index 16f949eef52fdfd7c90fa15b44093334d1355aaf..2f8f93da5dc2660f4db37c04f8a434809b3120a1 100644 (file)
@@ -503,7 +503,8 @@ struct sctp_datamsg {
        /* Did the messenge fail to send? */
        int send_error;
        u8 send_failed:1,
-          can_delay;       /* should this message be Nagle delayed */
+          can_delay:1, /* should this message be Nagle delayed */
+          abandoned:1; /* should this message be abandoned */
 };
 
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
index 79e1a2c7912c03d8281d449609d57cc909138a3b..9155da42269208b358df8535b14dfd3dba509365 100644 (file)
@@ -685,11 +685,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-           sk->sk_family == AF_INET6)
-               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-       else
-               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
index 4e09398009c10a72478b43d3cffc24ba01612b91..6da880d2f022c0cfd787a62af2bb7d222348af32 100644 (file)
@@ -844,12 +844,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
 }
 #endif
 
-/* TCP_SKB_CB reference means this can not be used from early demux */
 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
        if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-           skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+           skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
                return true;
 #endif
        return false;
@@ -1056,7 +1055,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
                            struct rate_sample *rs);
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-                 struct rate_sample *rs);
+                 bool is_sack_reneg, struct rate_sample *rs);
 void tcp_rate_check_app_limited(struct sock *sk);
 
 /* These functions determine how the current flow behaves in respect of SACK
index 4cd0f05d01134d1a1e2d5bd231407bfd7d92d250..8989a92c571a2d7036b74b233913b588e4e4248c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/tracepoint.h>
+#include <linux/bpf.h>
 
 #define __XDP_ACT_MAP(FN)      \
        FN(ABORTED)             \
diff --git a/include/uapi/asm-generic/bpf_perf_event.h b/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..53815d2
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
index 90fc490f973f9eb1c8a17d8c4eced4f12452de9e..821f71a2e48fa67b8ef039b891fff1098c066de5 100644 (file)
@@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN,                    0,  8)
 
 #define PTR_CHECK_DEV                  ((1 << PTR_DEV_BITS) - 1)
 
-#define PTR(gen, offset, dev)                                          \
+#define MAKE_PTR(gen, offset, dev)                                     \
        ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
 
 /* Bkey utility code */
index af549d4ecf1b6e76522c6a157db98d48e4190b30..8f95303f9d807d10d4fd6850d91a2486b0a490ec 100644 (file)
@@ -8,11 +8,10 @@
 #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
 
 struct bpf_perf_event_data {
-       struct pt_regs regs;
+       bpf_user_pt_regs_t regs;
        __u64 sample_period;
 };
 
index 731d0df722e3a0422edc8dee848393b4785a9a40..6e80501368aee68e77f800349c25d53adb56b7c6 100644 (file)
@@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args {
 };
 
 struct kfd_ioctl_set_scratch_backing_va_args {
-       uint64_t va_addr;       /* to KFD */
-       uint32_t gpu_id;        /* to KFD */
-       uint32_t pad;
+       __u64 va_addr;  /* to KFD */
+       __u32 gpu_id;   /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_get_tile_config_args {
        /* to KFD: pointer to tile array */
-       uint64_t tile_config_ptr;
+       __u64 tile_config_ptr;
        /* to KFD: pointer to macro tile array */
-       uint64_t macro_tile_config_ptr;
+       __u64 macro_tile_config_ptr;
        /* to KFD: array size allocated by user mode
         * from KFD: array size filled by kernel
         */
-       uint32_t num_tile_configs;
+       __u32 num_tile_configs;
        /* to KFD: array size allocated by user mode
         * from KFD: array size filled by kernel
         */
-       uint32_t num_macro_tile_configs;
+       __u32 num_macro_tile_configs;
 
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t gb_addr_config;        /* from KFD */
-       uint32_t num_banks;             /* from KFD */
-       uint32_t num_ranks;             /* from KFD */
+       __u32 gpu_id;           /* to KFD */
+       __u32 gb_addr_config;   /* from KFD */
+       __u32 num_banks;                /* from KFD */
+       __u32 num_ranks;                /* from KFD */
        /* struct size can be extended later if needed
         * without breaking ABI compatibility
         */
index b9f8686a84cf1a5ee9d2b92d21579af11d8690aa..86b50aa26ee80adac9ba7ac52248c64cbea19b26 100644 (file)
@@ -1447,7 +1447,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
        rcu_read_lock();
        prog = rcu_dereference(progs)->progs;
        for (; *prog; prog++)
-               cnt++;
+               if (*prog != &dummy_bpf_prog.prog)
+                       cnt++;
        rcu_read_unlock();
        return cnt;
 }
index 68ec884440b75da08824249db74bb992f6d938ce..8455b89d1bbf698f86c44bd1e1846b4c7d4ba60f 100644 (file)
@@ -1,3 +1,18 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
index 16beab4767e1e686e8ccd3642a82cbc4adde7f59..ba957b9812b3cf430c6b4f4d6e220f9a264f5d74 100644 (file)
@@ -7987,11 +7987,11 @@ static void bpf_overflow_handler(struct perf_event *event,
 {
        struct bpf_perf_event_data_kern ctx = {
                .data = data,
-               .regs = regs,
                .event = event,
        };
        int ret = 0;
 
+       ctx.regs = perf_arch_bpf_user_pt_regs(regs);
        preempt_disable();
        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
                goto out;
index 206e0e2ace53d18b77437d45d8baef24bce97187..987d9a9ae2839a2daefdd5c2d24ffe9c6d440bde 100644 (file)
@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                return ret;
 
        if (copy_to_user(arg, &buts, sizeof(buts))) {
-               blk_trace_remove(q);
+               __blk_trace_remove(q);
                return -EFAULT;
        }
        return 0;
@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
                return ret;
 
        if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
-               blk_trace_remove(q);
+               __blk_trace_remove(q);
                return -EFAULT;
        }
 
@@ -872,7 +872,7 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-                             u32 what, int error, union kernfs_node_id *cgid)
+                             u32 what, int error)
 {
        struct blk_trace *bt = q->blk_trace;
 
@@ -880,22 +880,21 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
                return;
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
+                       bio_op(bio), bio->bi_opf, what, error, 0, NULL,
+                       blk_trace_bio_get_cgid(q, bio));
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
                                     struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
                                       struct request_queue *q, struct bio *bio,
                                       int error)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
@@ -903,8 +902,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
                                        struct request *rq,
                                        struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0,
-                        blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
@@ -912,15 +910,13 @@ static void blk_add_trace_bio_frontmerge(void *ignore,
                                         struct request *rq,
                                         struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
                                    struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -928,8 +924,7 @@ static void blk_add_trace_getrq(void *ignore,
                                struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
-                                 blk_trace_bio_get_cgid(q, bio));
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
@@ -945,8 +940,7 @@ static void blk_add_trace_sleeprq(void *ignore,
                                  struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
-                                 blk_trace_bio_get_cgid(q, bio));
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
index 27d1f4ffa3def946525b2d248757fac3620504e5..0ce99c379c3089a4857d082b64ede99feeea5282 100644 (file)
@@ -759,6 +759,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
 
 static DEFINE_MUTEX(bpf_event_mutex);
 
+#define BPF_TRACE_MAX_PROGS 64
+
 int perf_event_attach_bpf_prog(struct perf_event *event,
                               struct bpf_prog *prog)
 {
@@ -772,6 +774,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
                goto unlock;
 
        old_array = event->tp_event->prog_array;
+       if (old_array &&
+           bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
+               ret = -E2BIG;
+               goto unlock;
+       }
+
        ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
        if (ret < 0)
                goto unlock;
index 1b6087db95a54b665262ec8ee142a3a323850578..3ffc46e3bb6c84cee02bcb038b7739a52cbe8761 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __ashldi3(long long u, word_type b)
 {
index 2e67c97ac65a98737d054898884a0b944c3cd6de..ea054550f0e800897652b1415d9baa2b356d376d 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __ashrdi3(long long u, word_type b)
 {
index 6d7ebf6c2b862f32b9dd391c8f456769472ec62d..2250da7e503ebaebbdd86a967eb2b6c36560d3f6 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 word_type notrace __cmpdi2(long long a, long long b)
 {
index 8e845f4bb65f48eaec6ae720fb2515d1e8de52b8..99cfa5721f2d2e0f042a214ccd40a7d63261d01c 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <linux/module.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __lshrdi3(long long u, word_type b)
 {
index 88938543e10a626f183fcea3e9127af286967daf..54c8b3123376bc0a17c3dec743fd9c42a8231fab 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <linux/export.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 #define W_TYPE_SIZE 32
 
index 8bf78b4b78f0a286e40941344ecd4b504738cc01..dfa55c873c1318643fdbcbe916b9c18a54edc4c9 100644 (file)
 #include <linux/types.h>
 #include <net/netlink.h>
 
-/* for these data types attribute length must be exactly given size */
+/* For these data types, attribute length should be exactly the given
+ * size. However, to maintain compatibility with broken commands, if the
+ * attribute length does not match the expected size a warning is emitted
+ * to the user that the command is sending invalid data and needs to be fixed.
+ */
 static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
        [NLA_U8]        = sizeof(u8),
        [NLA_U16]       = sizeof(u16),
@@ -28,8 +32,16 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
 };
 
 static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+       [NLA_U8]        = sizeof(u8),
+       [NLA_U16]       = sizeof(u16),
+       [NLA_U32]       = sizeof(u32),
+       [NLA_U64]       = sizeof(u64),
        [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
+       [NLA_S8]        = sizeof(s8),
+       [NLA_S16]       = sizeof(s16),
+       [NLA_S32]       = sizeof(s32),
+       [NLA_S64]       = sizeof(s64),
 };
 
 static int validate_nla_bitfield32(const struct nlattr *nla,
@@ -69,11 +81,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
 
        BUG_ON(pt->type > NLA_TYPE_MAX);
 
-       /* for data types NLA_U* and NLA_S* require exact length */
-       if (nla_attr_len[pt->type]) {
-               if (attrlen != nla_attr_len[pt->type])
-                       return -ERANGE;
-               return 0;
+       if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+               pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+                                   current->comm, type);
        }
 
        switch (pt->type) {
index 49a53505c8e3527959f0586de21ea6b774fc7fad..25ca2d4c1e19118b2c34bdee66ab1a64ed54fe3d 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <linux/module.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 word_type __ucmpdi2(unsigned long long a, unsigned long long b)
 {
index 74b52dfd5852da3e211ae949ce7d31133361c14b..84b2dc76f140e922e2ed0d7c4d545b4d4ddf496d 100644 (file)
@@ -113,11 +113,23 @@ static const struct file_operations bdi_debug_stats_fops = {
        .release        = single_release,
 };
 
-static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 {
+       if (!bdi_debug_root)
+               return -ENOMEM;
+
        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
+       if (!bdi->debug_dir)
+               return -ENOMEM;
+
        bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
                                               bdi, &bdi_debug_stats_fops);
+       if (!bdi->debug_stats) {
+               debugfs_remove(bdi->debug_dir);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 static void bdi_debug_unregister(struct backing_dev_info *bdi)
@@ -129,9 +141,10 @@ static void bdi_debug_unregister(struct backing_dev_info *bdi)
 static inline void bdi_debug_init(void)
 {
 }
-static inline void bdi_debug_register(struct backing_dev_info *bdi,
+static inline int bdi_debug_register(struct backing_dev_info *bdi,
                                      const char *name)
 {
+       return 0;
 }
 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 {
@@ -869,10 +882,13 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
        if (IS_ERR(dev))
                return PTR_ERR(dev);
 
+       if (bdi_debug_register(bdi, dev_name(dev))) {
+               device_destroy(bdi_class, dev->devt);
+               return -ENOMEM;
+       }
        cgwb_bdi_register(bdi);
        bdi->dev = dev;
 
-       bdi_debug_register(bdi, dev_name(dev));
        set_bit(WB_registered, &bdi->wb.state);
 
        spin_lock_bh(&bdi_lock);
index 2f98df0d460eef41f80586544ad98abb66fae60c..297c7238f7d4094a6ac4ab0dc72e04abb870972f 100644 (file)
@@ -53,6 +53,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
                ret = -EFAULT;
                goto out;
        }
+
+       /*
+        * While get_vaddr_frames() could be used for transient (kernel
+        * controlled lifetime) pinning of memory pages all current
+        * users establish long term (userspace controlled lifetime)
+        * page pinning. Treat get_vaddr_frames() like
+        * get_user_pages_longterm() and disallow it for filesystem-dax
+        * mappings.
+        */
+       if (vma_is_fsdax(vma))
+               return -EOPNOTSUPP;
+
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
                vec->got_ref = true;
                vec->is_pfns = false;
index dfcde13f289a76ddcb54919f900467aeab15609d..d3fb60e5bfacd4c733957dc526c28c41bd2321d1 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  */
 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 {
-       return pte_write(pte) ||
+       return pte_access_permitted(pte, WRITE) ||
                ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
 }
 
@@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
 }
 EXPORT_SYMBOL(get_user_pages);
 
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas_arg)
+{
+       struct vm_area_struct **vmas = vmas_arg;
+       struct vm_area_struct *vma_prev = NULL;
+       long rc, i;
+
+       if (!pages)
+               return -EINVAL;
+
+       if (!vmas) {
+               vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+                              GFP_KERNEL);
+               if (!vmas)
+                       return -ENOMEM;
+       }
+
+       rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+       for (i = 0; i < rc; i++) {
+               struct vm_area_struct *vma = vmas[i];
+
+               if (vma == vma_prev)
+                       continue;
+
+               vma_prev = vma;
+
+               if (vma_is_fsdax(vma))
+                       break;
+       }
+
+       /*
+        * Either get_user_pages() failed, or the vma validation
+        * succeeded, in either case we don't need to put_page() before
+        * returning.
+        */
+       if (i >= rc)
+               goto out;
+
+       for (i = 0; i < rc; i++)
+               put_page(pages[i]);
+       rc = -EOPNOTSUPP;
+out:
+       if (vmas != vmas_arg)
+               kfree(vmas);
+       return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
  * @vma:   target vma
index ea19742a5d60b1a6270629a024d88a13b9c5f3c1..3a5c172af56039bb26007ea4cc5ec4ca0a9bf659 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -391,11 +391,11 @@ again:
                if (pmd_protnone(pmd))
                        return hmm_vma_walk_clear(start, end, walk);
 
-               if (write_fault && !pmd_write(pmd))
+               if (!pmd_access_permitted(pmd, write_fault))
                        return hmm_vma_walk_clear(start, end, walk);
 
                pfn = pmd_pfn(pmd) + pte_index(addr);
-               flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
+               flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0;
                for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
                        pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
                return 0;
@@ -456,11 +456,11 @@ again:
                        continue;
                }
 
-               if (write_fault && !pte_write(pte))
+               if (!pte_access_permitted(pte, write_fault))
                        goto fault;
 
                pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
-               pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
+               pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0;
                continue;
 
 fault:
index 0e7ded98d114d184877d2fc9bd0f02c3187f2ed5..2f2f5e77490278f58c6e9a923899255efff77551 100644 (file)
@@ -870,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
         */
        WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
                return NULL;
 
        if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -1012,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pud_lockptr(mm, pud));
 
-       if (flags & FOLL_WRITE && !pud_write(*pud))
+       if (!pud_access_permitted(*pud, flags & FOLL_WRITE))
                return NULL;
 
        if (pud_present(*pud) && pud_devmap(*pud))
@@ -1386,7 +1386,7 @@ out_unlock:
  */
 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
 {
-       return pmd_write(pmd) ||
+       return pmd_access_permitted(pmd, WRITE) ||
               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
 }
 
index 681b300185c0c0383bb240d6a898849bf777f46b..9a334f5fb730873190a57648bc0f040f91ac0ed6 100644 (file)
@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
        }
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       if (addr & ~(huge_page_mask(hstate_vma(vma))))
+               return -EINVAL;
+       return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
+       .split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -4627,7 +4635,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
-       p4d = p4d_offset(pgd, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
        pud = pud_alloc(mm, p4d, addr);
        if (pud) {
                if (sz == PUD_SIZE) {
index e4738d5e9b8c5214c106756b311e102eaf2cdad1..3d4781756d50fef924f52c0d9cb6cb0cbddd479f 100644 (file)
@@ -1523,6 +1523,8 @@ static void kmemleak_scan(void)
                        if (page_count(page) == 0)
                                continue;
                        scan_block(page, page + 1, NULL);
+                       if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
+                               cond_resched();
                }
        }
        put_online_mems();
index 375cf32087e4a2da0c42b251a1d5538ffaa1c857..751e97aa22106f9be73919033271ad9f98498fca 100644 (file)
@@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
 {
        struct file *file = vma->vm_file;
 
+       *prev = vma;
 #ifdef CONFIG_SWAP
        if (!file) {
-               *prev = vma;
                force_swapin_readahead(vma, start, end);
                return 0;
        }
 
        if (shmem_mapping(file->f_mapping)) {
-               *prev = vma;
                force_shm_swapin_readahead(vma, start, end,
                                        file->f_mapping);
                return 0;
@@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
                return 0;
        }
 
-       *prev = vma;
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        if (end > vma->vm_end)
                end = vma->vm_end;
index 50e6906314f8d9c987181744c9a45b54872edfb6..ac2ffd5e02b914fb9564649c9475babc51119de6 100644 (file)
@@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        memcg_check_events(memcg, page);
 
        if (!mem_cgroup_is_root(memcg))
-               css_put(&memcg->css);
+               css_put_many(&memcg->css, nr_entries);
 }
 
 /**
index 85e7a87da79fe4a5487e1f3f6216e61b9827515c..5eb3d2524bdc28239b33a0ac6e385fa5a5b9aaf9 100644 (file)
@@ -3948,7 +3948,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
        if (unlikely(!pte_same(*vmf->pte, entry)))
                goto unlock;
        if (vmf->flags & FAULT_FLAG_WRITE) {
-               if (!pte_write(entry))
+               if (!pte_access_permitted(entry, WRITE))
                        return do_wp_page(vmf);
                entry = pte_mkdirty(entry);
        }
@@ -4013,7 +4013,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 
                        /* NUMA case for anonymous PUDs would go here */
 
-                       if (dirty && !pud_write(orig_pud)) {
+                       if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
                                ret = wp_huge_pud(&vmf, orig_pud);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
@@ -4046,7 +4046,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                        if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&vmf, orig_pmd);
 
-                       if (dirty && !pmd_write(orig_pmd)) {
+                       if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
                                ret = wp_huge_pmd(&vmf, orig_pmd);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
@@ -4336,7 +4336,7 @@ int follow_phys(struct vm_area_struct *vma,
                goto out;
        pte = *ptep;
 
-       if ((flags & FOLL_WRITE) && !pte_write(pte))
+       if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                goto unlock;
 
        *prot = pgprot_val(pte_pgprot(pte));
index 924839fac0e6421a77839825a99833a342d3153c..a4d5468212149db8a4cf20f9917c7bf48231a9ce 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2555,9 +2555,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *new;
        int err;
 
-       if (is_vm_hugetlb_page(vma) && (addr &
-                                       ~(huge_page_mask(hstate_vma(vma)))))
-               return -EINVAL;
+       if (vma->vm_ops && vma->vm_ops->split) {
+               err = vma->vm_ops->split(vma, addr);
+               if (err)
+                       return err;
+       }
 
        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
index c86fbd1b590ecda69741d4c1d9a9c0875d98ee69..c957be32b27a9e7a17a6e33e69a31b1b6fa8e820 100644 (file)
@@ -550,7 +550,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
         */
        set_bit(MMF_UNSTABLE, &mm->flags);
 
-       tlb_gather_mmu(&tlb, mm, 0, -1);
        for (vma = mm->mmap ; vma; vma = vma->vm_next) {
                if (!can_madv_dontneed_vma(vma))
                        continue;
@@ -565,11 +564,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
                 * we do not want to block exit_mmap by keeping mm ref
                 * count elevated without a good reason.
                 */
-               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+                       tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
                        unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
                                         NULL);
+                       tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+               }
        }
-       tlb_finish_mmu(&tlb, 0, -1);
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                        task_pid_nr(tsk), tsk->comm,
                        K(get_mm_counter(mm, MM_ANONPAGES)),
index e7095030aa1fafbf74d808a7de7943d4ae796593..586f31261c8328e30106254e09e52fa6e93f410e 100644 (file)
@@ -433,11 +433,8 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        else
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
-       if (unlikely(bg_thresh >= thresh)) {
-               pr_warn("vm direct limit must be set greater than background limit.\n");
+       if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
-       }
-
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
index d4096f4a5c1f75a276620b89cad5e2ed8878d0b0..73f5d4556b3d0b7218bea0cb9bb0fdd1f1cb3cdd 100644 (file)
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)
        if (WARN_ON_ONCE(!mm_percpu_wq))
                return;
 
-       /* Workqueues cannot recurse */
-       if (current->flags & PF_WQ_WORKER)
-               return;
-
        /*
         * Do not drain if one is already in progress unless it's specific to
         * a zone. Such callers are primarily CMA and memory hotplug and need
@@ -7656,11 +7652,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /*
         * In case of -EBUSY, we'd like to know which page causes problem.
-        * So, just fall through. We will check it in test_pages_isolated().
+        * So, just fall through. test_pages_isolated() has a tracepoint
+        * which will report the busy page.
+        *
+        * It is possible that busy pages could become available before
+        * the call to test_pages_isolated, and the range will actually be
+        * allocated.  So, if we fall through be sure to clear ret so that
+        * -EBUSY is not accidentally used or returned to caller.
         */
        ret = __alloc_contig_migrate_range(&cc, start, end);
        if (ret && ret != -EBUSY)
                goto done;
+       ret =0;
 
        /*
         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
index 985046ae42312e86505d6fded2fb56501c38536b..80f5c79053a4d2eac267f555fd732b7c1687ba36 100644 (file)
@@ -839,7 +839,6 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
        if (IS_ERR(file)) {
                pr_err("%s (%d): failed to map fd\n",
                       __func__, task_pid_nr(current));
-               sock_release(csocket);
                kfree(p);
                return PTR_ERR(file);
        }
index 07ed21d64f92b39da9b683aa432efde6a14afdf0..f47e96b623088ae354947787ef17217cd32ae64e 100644 (file)
@@ -1106,7 +1106,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
         * when the name is long and there isn't enough space left
         * for the digits, or if all bits are used.
         */
-       return p ? -ENFILE : -EEXIST;
+       return -ENFILE;
 }
 
 static int dev_alloc_name_ns(struct net *net,
index abd07a443219853b022bef41cb072e90ff8f07f0..178bb9833311f83205317b07fe64cb2e45a9f734 100644 (file)
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                if (state == DCCP_TIME_WAIT)
                        timeo = DCCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index b68168fcc06aa1981258eca4857511329af62f9a..9d43c1f4027408f3a2176767da0dd425938ba652 100644 (file)
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet = inet_sk(sk);
+       struct dccp_sock *dp = dccp_sk(sk);
        int err = 0;
        const int old_state = sk->sk_state;
 
@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
                sk->sk_err = ECONNRESET;
 
        dccp_clear_xmit_timers(sk);
+       ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+       ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+       dp->dccps_hc_rx_ccid = NULL;
+       dp->dccps_hc_tx_ccid = NULL;
 
        __skb_queue_purge(&sk->sk_receive_queue);
        __skb_queue_purge(&sk->sk_write_queue);
index c690cd0d9b3f0af53c23b9a1ecc87be4098ae059..b563e0c46bac2362acccf38495546a8b6b726384 100644 (file)
@@ -93,7 +93,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
 }
 
 /*
- * Enter the time wait state.
+ * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
@@ -111,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
         */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
                        hashinfo->bhash_size)];
-       spin_lock_bh(&bhead->lock);
+       spin_lock(&bhead->lock);
        tw->tw_tb = icsk->icsk_bind_hash;
        WARN_ON(!icsk->icsk_bind_hash);
        inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -137,7 +137,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        if (__sk_nulls_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-       spin_unlock_bh(lock);
+       spin_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
index bf97317e6c974285a652664d02e8b2c48a8cfe96..f08eebe60446e2e99d19bd0ea51e5fafb96aca63 100644 (file)
@@ -2412,6 +2412,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->snd_cwnd_cnt = 0;
        tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
        /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
index 69ee877574d08b36bc990f890899037108eafe05..8322f26e770e4406fe9accb386a99659941cc874 100644 (file)
@@ -110,7 +110,8 @@ struct bbr {
        u32     lt_last_lost;        /* LT intvl start: tp->lost */
        u32     pacing_gain:10, /* current gain for setting pacing rate */
                cwnd_gain:10,   /* current gain for setting cwnd */
-               full_bw_cnt:3,  /* number of rounds without large bw gains */
+               full_bw_reached:1,   /* reached full bw in Startup? */
+               full_bw_cnt:2,  /* number of rounds without large bw gains */
                cycle_idx:3,    /* current index in pacing_gain cycle array */
                has_seen_rtt:1, /* have we seen an RTT sample yet? */
                unused_b:5;
@@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
 {
        const struct bbr *bbr = inet_csk_ca(sk);
 
-       return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+       return bbr->full_bw_reached;
 }
 
 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
                return;
        }
        ++bbr->full_bw_cnt;
+       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
 }
 
 /* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk)
        bbr->restore_cwnd = 0;
        bbr->round_start = 0;
        bbr->idle_restart = 0;
+       bbr->full_bw_reached = 0;
        bbr->full_bw = 0;
        bbr->full_bw_cnt = 0;
        bbr->cycle_mstamp = 0;
@@ -871,6 +874,11 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
  */
 static u32 bbr_undo_cwnd(struct sock *sk)
 {
+       struct bbr *bbr = inet_csk_ca(sk);
+
+       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
+       bbr->full_bw_cnt = 0;
+       bbr_reset_lt_bw_sampling(sk);
        return tcp_sk(sk)->snd_cwnd;
 }
 
index 734cfc8ff76edf3453921b50620be2986bfcfdb9..9550cc42de2d9ba4cca6d961a2a3bca501755a69 100644 (file)
@@ -579,6 +579,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
        int time;
        int copied;
 
+       tcp_mstamp_refresh(tp);
        time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
        if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
                return;
@@ -1941,6 +1942,8 @@ void tcp_enter_loss(struct sock *sk)
        if (is_reneg) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
                tp->sacked_out = 0;
+               /* Mark SACK reneging until we recover from this loss event. */
+               tp->is_sack_reneg = 1;
        }
        tcp_clear_all_retrans_hints(tp);
 
@@ -2326,6 +2329,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
        }
        tp->snd_cwnd_stamp = tcp_jiffies32;
        tp->undo_marker = 0;
+       tp->rack.advanced = 1; /* Force RACK to re-exam losses */
 }
 
 static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2364,6 +2368,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                return true;
        }
        tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
        return false;
 }
 
@@ -2397,8 +2402,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
                        NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPSPURIOUSRTOS);
                inet_csk(sk)->icsk_retransmits = 0;
-               if (frto_undo || tcp_is_sack(tp))
+               if (frto_undo || tcp_is_sack(tp)) {
                        tcp_set_ca_state(sk, TCP_CA_Open);
+                       tp->is_sack_reneg = 0;
+               }
                return true;
        }
        return false;
@@ -3495,6 +3502,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        struct tcp_sacktag_state sack_state;
        struct rate_sample rs = { .prior_delivered = 0 };
        u32 prior_snd_una = tp->snd_una;
+       bool is_sack_reneg = tp->is_sack_reneg;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        bool is_dupack = false;
@@ -3611,7 +3619,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
-       tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+       tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
        tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
        tcp_xmit_recovery(sk, rexmit);
        return 1;
index c6bc0c4d19c624888b0d0b5a4246c7183edf63f5..77ea45da0fe9c746907a312989658af3ad3b198d 100644 (file)
@@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_filter);
 
+static void tcp_v4_restore_cb(struct sk_buff *skb)
+{
+       memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
+               sizeof(struct inet_skb_parm));
+}
+
+static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
+                          const struct tcphdr *th)
+{
+       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
+        * barrier() makes sure compiler wont play fool^Waliasing games.
+        */
+       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
+               sizeof(struct inet_skb_parm));
+       barrier();
+
+       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+                                   skb->len - th->doff * 4);
+       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
+       TCP_SKB_CB(skb)->sacked  = 0;
+       TCP_SKB_CB(skb)->has_rxtstamp =
+                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
+}
+
 /*
  *     From tcp_input.c
  */
@@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
-       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
-        * barrier() makes sure compiler wont play fool^Waliasing games.
-        */
-       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
-               sizeof(struct inet_skb_parm));
-       barrier();
-
-       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
-       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
-                                   skb->len - th->doff * 4);
-       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
-       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
-       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
-       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
-       TCP_SKB_CB(skb)->sacked  = 0;
-       TCP_SKB_CB(skb)->has_rxtstamp =
-                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
-
 lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
                               th->dest, sdif, &refcounted);
@@ -1679,14 +1689,19 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       iph = ip_hdr(skb);
+                       tcp_v4_fill_cb(skb, iph, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
                }
                if (nsk == sk) {
                        reqsk_put(req);
+                       tcp_v4_restore_cb(skb);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
@@ -1712,6 +1727,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
+       tcp_v4_fill_cb(skb, iph, th);
 
        skb->dev = NULL;
 
@@ -1742,6 +1758,8 @@ no_tcp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
 csum_error:
                __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1768,6 +1786,8 @@ do_time_wait:
                goto discard_it;
        }
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
@@ -1784,6 +1804,7 @@ do_time_wait:
                if (sk2) {
                        inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
+                       tcp_v4_restore_cb(skb);
                        refcounted = false;
                        goto process;
                }
index e36eff0403f4e80c4f7291a70614f40125652133..b079b619b60ca577d5ef20a5065fce87acecd96c 100644 (file)
@@ -310,10 +310,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                if (state == TCP_TIME_WAIT)
                        timeo = TCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index 3330a370d3061edd7cda90e1f50713ed0e7868a1..c61240e43923d6dd6a5d6215074e2da2c2bc71f4 100644 (file)
@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
 
 /* Update the connection delivery information and generate a rate sample. */
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-                 struct rate_sample *rs)
+                 bool is_sack_reneg, struct rate_sample *rs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 snd_us, ack_us;
@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
 
        rs->acked_sacked = delivered;   /* freshly ACKed or SACKed */
        rs->losses = lost;              /* freshly marked lost */
-       /* Return an invalid sample if no timing information is available. */
-       if (!rs->prior_mstamp) {
+       /* Return an invalid sample if no timing information is available or
+        * in recovery from loss with SACK reneging. Rate samples taken during
+        * a SACK reneging event may overestimate bw by including packets that
+        * were SACKed before the reneg.
+        */
+       if (!rs->prior_mstamp || is_sack_reneg) {
                rs->delivered = -1;
                rs->interval_us = -1;
                return;
index d3ea89020c69c17189f6a5eefb28e92bd97ac2e1..3a81720ac0c40877386e37c99f4f321ab4127fa4 100644 (file)
@@ -55,7 +55,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
         * to queuing or delayed ACKs.
         */
        reo_wnd = 1000;
-       if ((tp->rack.reord || !tp->lost_out) && min_rtt != ~0U) {
+       if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
+           min_rtt != ~0U) {
                reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
                reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
        }
@@ -79,12 +80,12 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
                 */
                remaining = tp->rack.rtt_us + reo_wnd -
                            tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
-               if (remaining < 0) {
+               if (remaining <= 0) {
                        tcp_rack_mark_skb_lost(sk, skb);
                        list_del_init(&skb->tcp_tsorted_anchor);
                } else {
-                       /* Record maximum wait time (+1 to avoid 0) */
-                       *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
+                       /* Record maximum wait time */
+                       *reo_timeout = max_t(u32, *reo_timeout, remaining);
                }
        }
 }
@@ -116,13 +117,8 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
 {
        u32 rtt_us;
 
-       if (tp->rack.mstamp &&
-           !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
-                                end_seq, tp->rack.end_seq))
-               return;
-
        rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
-       if (sacked & TCPCB_RETRANS) {
+       if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
                /* If the sacked packet was retransmitted, it's ambiguous
                 * whether the retransmission or the original (or the prior
                 * retransmission) was sacked.
@@ -133,13 +129,15 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                 * so it's at least one RTT (i.e., retransmission is at least
                 * an RTT later).
                 */
-               if (rtt_us < tcp_min_rtt(tp))
-                       return;
+               return;
        }
-       tp->rack.rtt_us = rtt_us;
-       tp->rack.mstamp = xmit_time;
-       tp->rack.end_seq = end_seq;
        tp->rack.advanced = 1;
+       tp->rack.rtt_us = rtt_us;
+       if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
+                               end_seq, tp->rack.end_seq)) {
+               tp->rack.mstamp = xmit_time;
+               tp->rack.end_seq = end_seq;
+       }
 }
 
 /* We have waited long enough to accommodate reordering. Mark the expired
index 3d3092adf1d2d5962b5fc87bdf08419762d1b1ee..db84f523656ddf876e1971c416ee03a6a1794d9d 100644 (file)
@@ -904,7 +904,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
                if (t->parms.collect_md) {
                        tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
                        if (!tun_dst)
-                               return 0;
+                               goto drop;
                }
                ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
                                    log_ecn_error);
index be11dc13aa705145a83177e17d23594e9416e11a..1f04ec0e4a7aa2c11b8ee27cbdd4067b5bcf32e5 100644 (file)
@@ -1454,7 +1454,6 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               tcp_v6_fill_cb(skb, hdr, th);
                if (tcp_v6_inbound_md5_hash(sk, skb)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
@@ -1467,8 +1466,12 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       hdr = ipv6_hdr(skb);
+                       tcp_v6_fill_cb(skb, hdr, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
@@ -1492,8 +1495,6 @@ process:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       tcp_v6_fill_cb(skb, hdr, th);
-
        if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
@@ -1501,6 +1502,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        hdr = ipv6_hdr(skb);
+       tcp_v6_fill_cb(skb, hdr, th);
 
        skb->dev = NULL;
 
index 0b750a22c4b9bf92e079fcd4a694fccf81f00a8e..d4e98f20fc2ac1c55a1f1db67498af900e0842ea 100644 (file)
@@ -1625,60 +1625,30 @@ static struct proto kcm_proto = {
 };
 
 /* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
-                    struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
 {
        struct socket *newsock;
        struct sock *newsk;
-       struct file *newfile;
-       int err, newfd;
 
-       err = -ENFILE;
        newsock = sock_alloc();
        if (!newsock)
-               goto out;
+               return ERR_PTR(-ENFILE);
 
        newsock->type = osock->type;
        newsock->ops = osock->ops;
 
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(0);
-       if (unlikely(newfd < 0)) {
-               err = newfd;
-               goto out_fd_fail;
-       }
-
-       newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
-       if (IS_ERR(newfile)) {
-               err = PTR_ERR(newfile);
-               goto out_sock_alloc_fail;
-       }
-
        newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
                         &kcm_proto, true);
        if (!newsk) {
-               err = -ENOMEM;
-               goto out_sk_alloc_fail;
+               sock_release(newsock);
+               return ERR_PTR(-ENOMEM);
        }
-
        sock_init_data(newsock, newsk);
        init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
 
-       fd_install(newfd, newfile);
-       *newsockp = newsock;
-       info->fd = newfd;
-
-       return 0;
-
-out_sk_alloc_fail:
-       fput(newfile);
-out_sock_alloc_fail:
-       put_unused_fd(newfd);
-out_fd_fail:
-       sock_release(newsock);
-out:
-       return err;
+       return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
 }
 
 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1708,17 +1678,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        }
        case SIOCKCMCLONE: {
                struct kcm_clone info;
-               struct socket *newsock = NULL;
-
-               err = kcm_clone(sock, &info, &newsock);
-               if (!err) {
-                       if (copy_to_user((void __user *)arg, &info,
-                                        sizeof(info))) {
-                               err = -EFAULT;
-                               sys_close(info.fd);
-                       }
-               }
+               struct file *file;
+
+               info.fd = get_unused_fd_flags(0);
+               if (unlikely(info.fd < 0))
+                       return info.fd;
 
+               file = kcm_clone(sock);
+               if (IS_ERR(file)) {
+                       put_unused_fd(info.fd);
+                       return PTR_ERR(file);
+               }
+               if (copy_to_user((void __user *)arg, &info,
+                                sizeof(info))) {
+                       put_unused_fd(info.fd);
+                       fput(file);
+                       return -EFAULT;
+               }
+               fd_install(info.fd, file);
+               err = 0;
                break;
        }
        default:
index 8886f15abe90e16005d7e02f3514476c68d2b0db..bc2f1e0977d657ec09f176cfcecf28839eb1fab0 100644 (file)
@@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
        long i;
        int ret;
 
-       if (rs->rs_bound_addr == 0) {
+       if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
                ret = -ENOTCONN; /* XXX not a great errno */
                goto out;
        }
index 8f7cf4c042be2b9b4379968655bea594a2928546..dcd818fa837e0af91978d6f1128085f93eb80f15 100644 (file)
@@ -860,6 +860,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
 static int rxrpc_release_sock(struct sock *sk)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sk);
+       struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
        _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
@@ -895,8 +896,8 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_release_calls_on_socket(rx);
        flush_workqueue(rxrpc_workqueue);
        rxrpc_purge_queue(&sk->sk_receive_queue);
-       rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
-       rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
+       rxrpc_queue_work(&rxnet->client_conn_reaper);
 
        rxrpc_put_local(rx->local);
        rx->local = NULL;
index b30a2c70bd489b36a2295a11707b6a36d4eb9ac0..531250fceb9e5a75d6a8b843e5e5fd9d481fddf2 100644 (file)
@@ -369,6 +369,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
+
        if (ctl->limit > CHOKE_MAX_QUEUE)
                return -EINVAL;
 
index 3839cbbdc32b1eadd2cae6a42a6b8c998ca88a15..cd1b200acae7415d5e26c8aa3dbfed602f5796b2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
@@ -277,6 +278,8 @@ unsigned long dev_trans_start(struct net_device *dev)
 
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
+       else if (netif_is_macvlan(dev))
+               dev = macvlan_dev_real_dev(dev);
        res = netdev_get_tx_queue(dev, 0)->trans_start;
        for (i = 1; i < dev->num_tx_queues; i++) {
                val = netdev_get_tx_queue(dev, i)->trans_start;
index 17c7130454bd90e8af1d17e95f477ea558fb481d..bc30f9186ac67cd7b1c21d4d2b0035d3a6b886af 100644 (file)
@@ -356,6 +356,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
+
        if (!q) {
                table->tab[dp] = q = *prealloc;
                *prealloc = NULL;
index 7f8ea9e297c36acd0969b0330ab479e0199f47ac..9d874e60e0323dee6bb7410b0ec34186eaac19d7 100644 (file)
@@ -212,6 +212,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
 
        if (ctl->limit > 0) {
                child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
index 09c1203c17119829d183fbdd0dfe9757460b863e..930e5bd26d3d7650a41b9472463c3fc39732495b 100644 (file)
@@ -639,6 +639,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (ctl->divisor &&
            (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
                return -EINVAL;
+       if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+                                       ctl_v1->Wlog))
+               return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
                if (!p)
index 7b261afc47b9d709fdd780a93aaba874f35d79be..7f8baa48e7c2a834aea292106fd319c2489432a3 100644 (file)
@@ -53,6 +53,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
        msg->send_failed = 0;
        msg->send_error = 0;
        msg->can_delay = 1;
+       msg->abandoned = 0;
        msg->expires_at = 0;
        INIT_LIST_HEAD(&msg->chunks);
 }
@@ -304,6 +305,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
        if (!chunk->asoc->peer.prsctp_capable)
                return 0;
 
+       if (chunk->msg->abandoned)
+               return 1;
+
+       if (!chunk->has_tsn &&
+           !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
+               return 0;
+
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
            time_after(jiffies, chunk->msg->expires_at)) {
                struct sctp_stream_out *streamout =
@@ -316,6 +324,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                        streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                }
+               chunk->msg->abandoned = 1;
                return 1;
        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
                   chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
@@ -324,10 +333,12 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 
                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
                streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
+               chunk->msg->abandoned = 1;
                return 1;
        } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
                   chunk->msg->expires_at &&
                   time_after(jiffies, chunk->msg->expires_at)) {
+               chunk->msg->abandoned = 1;
                return 1;
        }
        /* PRIO policy is processed by sendmsg, not here */
index 4db012aa25f7a042f063bc17b56270effebc6cc6..7d67feeeffc1e758ae4be4ef1ddaea23276d1f5e 100644 (file)
@@ -364,10 +364,12 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
                struct sctp_stream_out *streamout;
 
-               if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
+               if (!chk->msg->abandoned &&
+                   (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
                        continue;
 
+               chk->msg->abandoned = 1;
                list_del_init(&chk->transmitted_list);
                sctp_insert_list(&asoc->outqueue.abandoned,
                                 &chk->transmitted_list);
@@ -377,7 +379,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
                asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
                streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
 
-               if (!chk->tsn_gap_acked) {
+               if (queue != &asoc->outqueue.retransmit &&
+                   !chk->tsn_gap_acked) {
                        if (chk->transport)
                                chk->transport->flight_size -=
                                                sctp_data_size(chk);
@@ -403,10 +406,13 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
        q->sched->unsched_all(&asoc->stream);
 
        list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
-               if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
+               if (!chk->msg->abandoned &&
+                   (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
+                    !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
                        continue;
 
+               chk->msg->abandoned = 1;
                sctp_sched_dequeue_common(q, chk);
                asoc->sent_cnt_removable--;
                asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@@ -1434,7 +1440,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        /* If this chunk has not been acked, stop
                         * considering it as 'outstanding'.
                         */
-                       if (!tchunk->tsn_gap_acked) {
+                       if (transmitted_queue != &q->retransmit &&
+                           !tchunk->tsn_gap_acked) {
                                if (tchunk->transport)
                                        tchunk->transport->flight_size -=
                                                        sctp_data_size(tchunk);
index 014847e25648182dbf99d8fb095e094af76264bb..eb17a911aa29717ac0db25cf0662b8e24a420655 100644 (file)
@@ -5080,7 +5080,6 @@ static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *p
        *newfile = sock_alloc_file(newsock, 0, NULL);
        if (IS_ERR(*newfile)) {
                put_unused_fd(retval);
-               sock_release(newsock);
                retval = PTR_ERR(*newfile);
                *newfile = NULL;
                return retval;
index 42d8e9c9ccd5028793ebeb27fb319911a0f4ce35..05f361faec451cdd69168dd5e2cd7c2ca7c8f7fb 100644 (file)
@@ -406,8 +406,10 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
                name.len = strlen(name.name);
        }
        path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
-       if (unlikely(!path.dentry))
+       if (unlikely(!path.dentry)) {
+               sock_release(sock);
                return ERR_PTR(-ENOMEM);
+       }
        path.mnt = mntget(sock_mnt);
 
        d_instantiate(path.dentry, SOCK_INODE(sock));
@@ -415,9 +417,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
        file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
                  &socket_file_ops);
        if (IS_ERR(file)) {
-               /* drop dentry, keep inode */
+               /* drop dentry, keep inode for a bit */
                ihold(d_inode(path.dentry));
                path_put(&path);
+               /* ... and now kill it properly */
+               sock_release(sock);
                return file;
        }
 
@@ -1330,19 +1334,9 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
 
        retval = sock_create(family, type, protocol, &sock);
        if (retval < 0)
-               goto out;
-
-       retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
-       if (retval < 0)
-               goto out_release;
-
-out:
-       /* It may be already another descriptor 8) Not kernel problem. */
-       return retval;
+               return retval;
 
-out_release:
-       sock_release(sock);
-       return retval;
+       return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 }
 
 /*
@@ -1365,88 +1359,73 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
        if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
                flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
 
+       /*
+        * reserve descriptors and make sure we won't fail
+        * to return them to userland.
+        */
+       fd1 = get_unused_fd_flags(flags);
+       if (unlikely(fd1 < 0))
+               return fd1;
+
+       fd2 = get_unused_fd_flags(flags);
+       if (unlikely(fd2 < 0)) {
+               put_unused_fd(fd1);
+               return fd2;
+       }
+
+       err = put_user(fd1, &usockvec[0]);
+       if (err)
+               goto out;
+
+       err = put_user(fd2, &usockvec[1]);
+       if (err)
+               goto out;
+
        /*
         * Obtain the first socket and check if the underlying protocol
         * supports the socketpair call.
         */
 
        err = sock_create(family, type, protocol, &sock1);
-       if (err < 0)
+       if (unlikely(err < 0))
                goto out;
 
        err = sock_create(family, type, protocol, &sock2);
-       if (err < 0)
-               goto out_release_1;
-
-       err = sock1->ops->socketpair(sock1, sock2);
-       if (err < 0)
-               goto out_release_both;
-
-       fd1 = get_unused_fd_flags(flags);
-       if (unlikely(fd1 < 0)) {
-               err = fd1;
-               goto out_release_both;
+       if (unlikely(err < 0)) {
+               sock_release(sock1);
+               goto out;
        }
 
-       fd2 = get_unused_fd_flags(flags);
-       if (unlikely(fd2 < 0)) {
-               err = fd2;
-               goto out_put_unused_1;
+       err = sock1->ops->socketpair(sock1, sock2);
+       if (unlikely(err < 0)) {
+               sock_release(sock2);
+               sock_release(sock1);
+               goto out;
        }
 
        newfile1 = sock_alloc_file(sock1, flags, NULL);
        if (IS_ERR(newfile1)) {
                err = PTR_ERR(newfile1);
-               goto out_put_unused_both;
+               sock_release(sock2);
+               goto out;
        }
 
        newfile2 = sock_alloc_file(sock2, flags, NULL);
        if (IS_ERR(newfile2)) {
                err = PTR_ERR(newfile2);
-               goto out_fput_1;
+               fput(newfile1);
+               goto out;
        }
 
-       err = put_user(fd1, &usockvec[0]);
-       if (err)
-               goto out_fput_both;
-
-       err = put_user(fd2, &usockvec[1]);
-       if (err)
-               goto out_fput_both;
-
        audit_fd_pair(fd1, fd2);
 
        fd_install(fd1, newfile1);
        fd_install(fd2, newfile2);
-       /* fd1 and fd2 may be already another descriptors.
-        * Not kernel problem.
-        */
-
        return 0;
 
-out_fput_both:
-       fput(newfile2);
-       fput(newfile1);
-       put_unused_fd(fd2);
-       put_unused_fd(fd1);
-       goto out;
-
-out_fput_1:
-       fput(newfile1);
-       put_unused_fd(fd2);
-       put_unused_fd(fd1);
-       sock_release(sock2);
-       goto out;
-
-out_put_unused_both:
+out:
        put_unused_fd(fd2);
-out_put_unused_1:
        put_unused_fd(fd1);
-out_release_both:
-       sock_release(sock2);
-out_release_1:
-       sock_release(sock1);
-out:
        return err;
 }
 
@@ -1562,7 +1541,6 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
        if (IS_ERR(newfile)) {
                err = PTR_ERR(newfile);
                put_unused_fd(newfd);
-               sock_release(newsock);
                goto out_put;
        }
 
index a801da812f8660246df1ac0949cf66c99e991907..e2a4184f3c5df94b953bbbfe8782edd185959f72 100644 (file)
@@ -1841,6 +1841,7 @@ call_bind_status(struct rpc_task *task)
        case -ECONNABORTED:
        case -ENOTCONN:
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -ENOBUFS:
@@ -1917,6 +1918,7 @@ call_connect_status(struct rpc_task *task)
                /* fall through */
        case -ECONNRESET:
        case -ECONNABORTED:
+       case -ENETDOWN:
        case -ENETUNREACH:
        case -EHOSTUNREACH:
        case -EADDRINUSE:
@@ -2022,6 +2024,7 @@ call_transmit_status(struct rpc_task *task)
                 */
        case -ECONNREFUSED:
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -EPERM:
@@ -2071,6 +2074,7 @@ call_bc_transmit(struct rpc_task *task)
        switch (task->tk_status) {
        case 0:
                /* Success */
+       case -ENETDOWN:
        case -EHOSTDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
@@ -2139,6 +2143,7 @@ call_status(struct rpc_task *task)
        task->tk_status = 0;
        switch(status) {
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -EPERM:
index 9cc850c2719e7da3216f3fdba66b752e99152b76..6d0cc3b8f932c35f47adb2b08a6a34a755ef8900 100644 (file)
@@ -2440,7 +2440,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
                 */
        case -ECONNREFUSED:
        case -ECONNRESET:
+       case -ENETDOWN:
        case -ENETUNREACH:
+       case -EHOSTUNREACH:
        case -EADDRINUSE:
        case -ENOBUFS:
                /*
index acaef80fb88cfca4ea569a003f52ca04a3e2f577..d60c303423275db5c0c7c45c9bf1a7d61987bf27 100644 (file)
@@ -314,6 +314,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
        newcon->usr_data = s->tipc_conn_new(newcon->conid);
        if (!newcon->usr_data) {
                sock_release(newsock);
+               conn_put(newcon);
                return -ENOMEM;
        }
 
@@ -511,7 +512,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
        s = con->server;
        scbr = s->tipc_conn_new(*conid);
        if (!scbr) {
-               tipc_close_conn(con);
+               conn_put(con);
                return false;
        }
 
index ecca64fc6a6f223bf8c0e09e3a299fe4fd62509d..3deabcab4882165b668f65319a3555027bf3b292 100644 (file)
@@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
                        goto rcu_out;
        }
 
-       tipc_rcv(sock_net(sk), skb, b);
-       rcu_read_unlock();
-       return 0;
-
 rcu_out:
        rcu_read_unlock();
 out:
index 5583df708b8cfedb61ca2e6fec93a79789d6f949..a827547aa102be4f3cf46a267c8c684e815e02d6 100644 (file)
@@ -487,7 +487,7 @@ static void hvs_release(struct vsock_sock *vsk)
 
        lock_sock(sk);
 
-       sk->sk_state = SS_DISCONNECTING;
+       sk->sk_state = TCP_CLOSING;
        vsock_remove_sock(vsk);
 
        release_sock(sk);
index 522ca9252d6cd41f5272b4755da876ff93cffdbb..242631aa4ea2366081711ba96284f189c9755569 100644 (file)
@@ -193,8 +193,18 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
                return -1;
        }
        event_fd[prog_cnt - 1] = efd;
-       ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
-       ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+       err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
+       if (err < 0) {
+               printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
+                      strerror(errno));
+               return -1;
+       }
+       err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+       if (err < 0) {
+               printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
+                      strerror(errno));
+               return -1;
+       }
 
        return 0;
 }
index 6f099f915dcfe15a46541af65d39327e6494f21f..94b664817ad91e2e48c8fef6361a20ab2a632763 100755 (executable)
@@ -83,8 +83,11 @@ def print_result(symboltype, symbolformat, argc):
     for d, n in delta:
         if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
 
-    print("Total: Before=%d, After=%d, chg %+.2f%%" % \
-        (otot, ntot, (ntot - otot)*100.0/otot))
+    if otot:
+        percent = (ntot - otot) * 100.0 / otot
+    else:
+        percent = 0
+    print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
 
 if sys.argv[1] == "-c":
     print_result("Function", "tT", 3)
index 1f5ce959f5965b0249f70786e44f472581625955..39e07d8574dd787c2af71937852156abb9b1a7fb 100755 (executable)
 set -o errexit
 set -o nounset
 
+READELF="${CROSS_COMPILE}readelf"
+ADDR2LINE="${CROSS_COMPILE}addr2line"
+SIZE="${CROSS_COMPILE}size"
+NM="${CROSS_COMPILE}nm"
+
 command -v awk >/dev/null 2>&1 || die "awk isn't installed"
-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
+command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
+command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
 
 usage() {
        echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
@@ -69,10 +76,10 @@ die() {
 find_dir_prefix() {
        local objfile=$1
 
-       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
        [[ -z $start_kernel_addr ]] && return
 
-       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
        [[ -z $file_line ]] && return
 
        local prefix=${file_line%init/main.c:*}
@@ -104,7 +111,7 @@ __faddr2line() {
 
        # Go through each of the object's symbols which match the func name.
        # In rare cases there might be duplicates.
-       file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
+       file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
        while read symbol; do
                local fields=($symbol)
                local sym_base=0x${fields[0]}
@@ -156,10 +163,10 @@ __faddr2line() {
 
                # pass real address to addr2line
                echo "$func+$offset/$sym_size:"
-               addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+               ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
                DONE=1
 
-       done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+       done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
 }
 
 [[ $# -lt 2 ]] && usage
index 620e811696592ddec6fb221e2bec07d436f258b4..4ac095118717022cfb3ea564b5ec37deaad91a90 100644 (file)
@@ -121,17 +121,19 @@ struct apparmor_audit_data {
                /* these entries require a custom callback fn */
                struct {
                        struct aa_label *peer;
-                       struct {
-                               const char *target;
-                               kuid_t ouid;
-                       } fs;
+                       union {
+                               struct {
+                                       const char *target;
+                                       kuid_t ouid;
+                               } fs;
+                               int signal;
+                       };
                };
                struct {
                        struct aa_profile *profile;
                        const char *ns;
                        long pos;
                } iface;
-               int signal;
                struct {
                        int rlim;
                        unsigned long max;
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..cefe7c7
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..543dd70
--- /dev/null
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *  S390 version
+ *    Copyright IBM Corp. 1999, 2000
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef _UAPI_S390_PTRACE_H
+#define _UAPI_S390_PTRACE_H
+
+/*
+ * Offsets in the user_regs_struct. They are used for the ptrace
+ * system call and in entry.S
+ */
+#ifndef __s390x__
+
+#define PT_PSWMASK  0x00
+#define PT_PSWADDR  0x04
+#define PT_GPR0     0x08
+#define PT_GPR1     0x0C
+#define PT_GPR2     0x10
+#define PT_GPR3     0x14
+#define PT_GPR4     0x18
+#define PT_GPR5     0x1C
+#define PT_GPR6     0x20
+#define PT_GPR7     0x24
+#define PT_GPR8     0x28
+#define PT_GPR9     0x2C
+#define PT_GPR10    0x30
+#define PT_GPR11    0x34
+#define PT_GPR12    0x38
+#define PT_GPR13    0x3C
+#define PT_GPR14    0x40
+#define PT_GPR15    0x44
+#define PT_ACR0     0x48
+#define PT_ACR1     0x4C
+#define PT_ACR2     0x50
+#define PT_ACR3     0x54
+#define PT_ACR4            0x58
+#define PT_ACR5            0x5C
+#define PT_ACR6            0x60
+#define PT_ACR7            0x64
+#define PT_ACR8            0x68
+#define PT_ACR9            0x6C
+#define PT_ACR10    0x70
+#define PT_ACR11    0x74
+#define PT_ACR12    0x78
+#define PT_ACR13    0x7C
+#define PT_ACR14    0x80
+#define PT_ACR15    0x84
+#define PT_ORIGGPR2 0x88
+#define PT_FPC     0x90
+/*
+ * A nasty fact of life that the ptrace api
+ * only supports passing of longs.
+ */
+#define PT_FPR0_HI  0x98
+#define PT_FPR0_LO  0x9C
+#define PT_FPR1_HI  0xA0
+#define PT_FPR1_LO  0xA4
+#define PT_FPR2_HI  0xA8
+#define PT_FPR2_LO  0xAC
+#define PT_FPR3_HI  0xB0
+#define PT_FPR3_LO  0xB4
+#define PT_FPR4_HI  0xB8
+#define PT_FPR4_LO  0xBC
+#define PT_FPR5_HI  0xC0
+#define PT_FPR5_LO  0xC4
+#define PT_FPR6_HI  0xC8
+#define PT_FPR6_LO  0xCC
+#define PT_FPR7_HI  0xD0
+#define PT_FPR7_LO  0xD4
+#define PT_FPR8_HI  0xD8
+#define PT_FPR8_LO  0XDC
+#define PT_FPR9_HI  0xE0
+#define PT_FPR9_LO  0xE4
+#define PT_FPR10_HI 0xE8
+#define PT_FPR10_LO 0xEC
+#define PT_FPR11_HI 0xF0
+#define PT_FPR11_LO 0xF4
+#define PT_FPR12_HI 0xF8
+#define PT_FPR12_LO 0xFC
+#define PT_FPR13_HI 0x100
+#define PT_FPR13_LO 0x104
+#define PT_FPR14_HI 0x108
+#define PT_FPR14_LO 0x10C
+#define PT_FPR15_HI 0x110
+#define PT_FPR15_LO 0x114
+#define PT_CR_9            0x118
+#define PT_CR_10    0x11C
+#define PT_CR_11    0x120
+#define PT_IEEE_IP  0x13C
+#define PT_LASTOFF  PT_IEEE_IP
+#define PT_ENDREGS  0x140-1
+
+#define GPR_SIZE       4
+#define CR_SIZE                4
+
+#define STACK_FRAME_OVERHEAD   96      /* size of minimum stack frame */
+
+#else /* __s390x__ */
+
+#define PT_PSWMASK  0x00
+#define PT_PSWADDR  0x08
+#define PT_GPR0     0x10
+#define PT_GPR1     0x18
+#define PT_GPR2     0x20
+#define PT_GPR3     0x28
+#define PT_GPR4     0x30
+#define PT_GPR5     0x38
+#define PT_GPR6     0x40
+#define PT_GPR7     0x48
+#define PT_GPR8     0x50
+#define PT_GPR9     0x58
+#define PT_GPR10    0x60
+#define PT_GPR11    0x68
+#define PT_GPR12    0x70
+#define PT_GPR13    0x78
+#define PT_GPR14    0x80
+#define PT_GPR15    0x88
+#define PT_ACR0     0x90
+#define PT_ACR1     0x94
+#define PT_ACR2     0x98
+#define PT_ACR3     0x9C
+#define PT_ACR4            0xA0
+#define PT_ACR5            0xA4
+#define PT_ACR6            0xA8
+#define PT_ACR7            0xAC
+#define PT_ACR8            0xB0
+#define PT_ACR9            0xB4
+#define PT_ACR10    0xB8
+#define PT_ACR11    0xBC
+#define PT_ACR12    0xC0
+#define PT_ACR13    0xC4
+#define PT_ACR14    0xC8
+#define PT_ACR15    0xCC
+#define PT_ORIGGPR2 0xD0
+#define PT_FPC     0xD8
+#define PT_FPR0     0xE0
+#define PT_FPR1     0xE8
+#define PT_FPR2     0xF0
+#define PT_FPR3     0xF8
+#define PT_FPR4     0x100
+#define PT_FPR5     0x108
+#define PT_FPR6     0x110
+#define PT_FPR7     0x118
+#define PT_FPR8     0x120
+#define PT_FPR9     0x128
+#define PT_FPR10    0x130
+#define PT_FPR11    0x138
+#define PT_FPR12    0x140
+#define PT_FPR13    0x148
+#define PT_FPR14    0x150
+#define PT_FPR15    0x158
+#define PT_CR_9     0x160
+#define PT_CR_10    0x168
+#define PT_CR_11    0x170
+#define PT_IEEE_IP  0x1A8
+#define PT_LASTOFF  PT_IEEE_IP
+#define PT_ENDREGS  0x1B0-1
+
+#define GPR_SIZE       8
+#define CR_SIZE                8
+
+#define STACK_FRAME_OVERHEAD   160      /* size of minimum stack frame */
+
+#endif /* __s390x__ */
+
+#define NUM_GPRS       16
+#define NUM_FPRS       16
+#define NUM_CRS                16
+#define NUM_ACRS       16
+
+#define NUM_CR_WORDS   3
+
+#define FPR_SIZE       8
+#define FPC_SIZE       4
+#define FPC_PAD_SIZE   4 /* gcc insists on aligning the fpregs */
+#define ACR_SIZE       4
+
+
+#define PTRACE_OLDSETOPTIONS        21
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+typedef union {
+       float   f;
+       double  d;
+       __u64   ui;
+       struct
+       {
+               __u32 hi;
+               __u32 lo;
+       } fp;
+} freg_t;
+
+typedef struct {
+       __u32   fpc;
+       __u32   pad;
+       freg_t  fprs[NUM_FPRS];
+} s390_fp_regs;
+
+#define FPC_EXCEPTION_MASK     0xF8000000
+#define FPC_FLAGS_MASK         0x00F80000
+#define FPC_DXC_MASK           0x0000FF00
+#define FPC_RM_MASK            0x00000003
+
+/* this typedef defines how a Program Status Word looks like */
+typedef struct {
+       unsigned long mask;
+       unsigned long addr;
+} __attribute__ ((aligned(8))) psw_t;
+
+#ifndef __s390x__
+
+#define PSW_MASK_PER           0x40000000UL
+#define PSW_MASK_DAT           0x04000000UL
+#define PSW_MASK_IO            0x02000000UL
+#define PSW_MASK_EXT           0x01000000UL
+#define PSW_MASK_KEY           0x00F00000UL
+#define PSW_MASK_BASE          0x00080000UL    /* always one */
+#define PSW_MASK_MCHECK                0x00040000UL
+#define PSW_MASK_WAIT          0x00020000UL
+#define PSW_MASK_PSTATE                0x00010000UL
+#define PSW_MASK_ASC           0x0000C000UL
+#define PSW_MASK_CC            0x00003000UL
+#define PSW_MASK_PM            0x00000F00UL
+#define PSW_MASK_RI            0x00000000UL
+#define PSW_MASK_EA            0x00000000UL
+#define PSW_MASK_BA            0x00000000UL
+
+#define PSW_MASK_USER          0x0000FF00UL
+
+#define PSW_ADDR_AMODE         0x80000000UL
+#define PSW_ADDR_INSN          0x7FFFFFFFUL
+
+#define PSW_DEFAULT_KEY                (((unsigned long) PAGE_DEFAULT_ACC) << 20)
+
+#define PSW_ASC_PRIMARY                0x00000000UL
+#define PSW_ASC_ACCREG         0x00004000UL
+#define PSW_ASC_SECONDARY      0x00008000UL
+#define PSW_ASC_HOME           0x0000C000UL
+
+#else /* __s390x__ */
+
+#define PSW_MASK_PER           0x4000000000000000UL
+#define PSW_MASK_DAT           0x0400000000000000UL
+#define PSW_MASK_IO            0x0200000000000000UL
+#define PSW_MASK_EXT           0x0100000000000000UL
+#define PSW_MASK_BASE          0x0000000000000000UL
+#define PSW_MASK_KEY           0x00F0000000000000UL
+#define PSW_MASK_MCHECK                0x0004000000000000UL
+#define PSW_MASK_WAIT          0x0002000000000000UL
+#define PSW_MASK_PSTATE                0x0001000000000000UL
+#define PSW_MASK_ASC           0x0000C00000000000UL
+#define PSW_MASK_CC            0x0000300000000000UL
+#define PSW_MASK_PM            0x00000F0000000000UL
+#define PSW_MASK_RI            0x0000008000000000UL
+#define PSW_MASK_EA            0x0000000100000000UL
+#define PSW_MASK_BA            0x0000000080000000UL
+
+#define PSW_MASK_USER          0x0000FF0180000000UL
+
+#define PSW_ADDR_AMODE         0x0000000000000000UL
+#define PSW_ADDR_INSN          0xFFFFFFFFFFFFFFFFUL
+
+#define PSW_DEFAULT_KEY                (((unsigned long) PAGE_DEFAULT_ACC) << 52)
+
+#define PSW_ASC_PRIMARY                0x0000000000000000UL
+#define PSW_ASC_ACCREG         0x0000400000000000UL
+#define PSW_ASC_SECONDARY      0x0000800000000000UL
+#define PSW_ASC_HOME           0x0000C00000000000UL
+
+#endif /* __s390x__ */
+
+
+/*
+ * The s390_regs structure is used to define the elf_gregset_t.
+ */
+typedef struct {
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+       unsigned int  acrs[NUM_ACRS];
+       unsigned long orig_gpr2;
+} s390_regs;
+
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+       unsigned long args[1];
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
+/*
+ * Now for the user space program event recording (trace) definitions.
+ * The following structures are used only for the ptrace interface, don't
+ * touch or even look at it if you don't want to modify the user-space
+ * ptrace interface. In particular stay away from it for in-kernel PER.
+ */
+typedef struct {
+       unsigned long cr[NUM_CR_WORDS];
+} per_cr_words;
+
+#define PER_EM_MASK 0xE8000000UL
+
+typedef struct {
+#ifdef __s390x__
+       unsigned                       : 32;
+#endif /* __s390x__ */
+       unsigned em_branching          : 1;
+       unsigned em_instruction_fetch  : 1;
+       /*
+        * Switching on storage alteration automatically fixes
+        * the storage alteration event bit in the users std.
+        */
+       unsigned em_storage_alteration : 1;
+       unsigned em_gpr_alt_unused     : 1;
+       unsigned em_store_real_address : 1;
+       unsigned                       : 3;
+       unsigned branch_addr_ctl       : 1;
+       unsigned                       : 1;
+       unsigned storage_alt_space_ctl : 1;
+       unsigned                       : 21;
+       unsigned long starting_addr;
+       unsigned long ending_addr;
+} per_cr_bits;
+
+typedef struct {
+       unsigned short perc_atmid;
+       unsigned long address;
+       unsigned char access_id;
+} per_lowcore_words;
+
+typedef struct {
+       unsigned perc_branching          : 1;
+       unsigned perc_instruction_fetch  : 1;
+       unsigned perc_storage_alteration : 1;
+       unsigned perc_gpr_alt_unused     : 1;
+       unsigned perc_store_real_address : 1;
+       unsigned                         : 3;
+       unsigned atmid_psw_bit_31        : 1;
+       unsigned atmid_validity_bit      : 1;
+       unsigned atmid_psw_bit_32        : 1;
+       unsigned atmid_psw_bit_5         : 1;
+       unsigned atmid_psw_bit_16        : 1;
+       unsigned atmid_psw_bit_17        : 1;
+       unsigned si                      : 2;
+       unsigned long address;
+       unsigned                         : 4;
+       unsigned access_id               : 4;
+} per_lowcore_bits;
+
+typedef struct {
+       union {
+               per_cr_words   words;
+               per_cr_bits    bits;
+       } control_regs;
+       /*
+        * The single_step and instruction_fetch bits are obsolete,
+        * the kernel always sets them to zero. To enable single
+        * stepping use ptrace(PTRACE_SINGLESTEP) instead.
+        */
+       unsigned  single_step       : 1;
+       unsigned  instruction_fetch : 1;
+       unsigned                    : 30;
+       /*
+        * These addresses are copied into cr10 & cr11 if single
+        * stepping is switched off
+        */
+       unsigned long starting_addr;
+       unsigned long ending_addr;
+       union {
+               per_lowcore_words words;
+               per_lowcore_bits  bits;
+       } lowcore;
+} per_struct;
+
+typedef struct {
+       unsigned int  len;
+       unsigned long kernel_addr;
+       unsigned long process_addr;
+} ptrace_area;
+
+/*
+ * S/390 specific non posix ptrace requests. I chose unusual values so
+ * they are unlikely to clash with future ptrace definitions.
+ */
+#define PTRACE_PEEKUSR_AREA          0x5000
+#define PTRACE_POKEUSR_AREA          0x5001
+#define PTRACE_PEEKTEXT_AREA         0x5002
+#define PTRACE_PEEKDATA_AREA         0x5003
+#define PTRACE_POKETEXT_AREA         0x5004
+#define PTRACE_POKEDATA_AREA         0x5005
+#define PTRACE_GET_LAST_BREAK        0x5006
+#define PTRACE_PEEK_SYSTEM_CALL       0x5007
+#define PTRACE_POKE_SYSTEM_CALL              0x5008
+#define PTRACE_ENABLE_TE             0x5009
+#define PTRACE_DISABLE_TE            0x5010
+#define PTRACE_TE_ABORT_RAND         0x5011
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
+
+/*
+ * PT_PROT definition is loosely based on hppa bsd definition in
+ * gdb/hppab-nat.c
+ */
+#define PTRACE_PROT                      21
+
+typedef enum {
+       ptprot_set_access_watchpoint,
+       ptprot_set_write_watchpoint,
+       ptprot_disable_watchpoint
+} ptprot_flags;
+
+typedef struct {
+       unsigned long lowaddr;
+       unsigned long hiaddr;
+       ptprot_flags prot;
+} ptprot_area;
+
+/* Sequence of bytes for breakpoint illegal instruction.  */
+#define S390_BREAKPOINT     {0x0,0x1}
+#define S390_BREAKPOINT_U16 ((__u16)0x0001)
+#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
+#define S390_SYSCALL_SIZE   2
+
+/*
+ * The user_regs_struct defines the way the user registers are
+ * store on the stack for signal handling.
+ */
+struct user_regs_struct {
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+       unsigned int  acrs[NUM_ACRS];
+       unsigned long orig_gpr2;
+       s390_fp_regs fp_regs;
+       /*
+        * These per registers are in here so that gdb can modify them
+        * itself as there is no "official" ptrace interface for hardware
+        * watchpoints. This is the way intel does it.
+        */
+       per_struct per_info;
+       unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_S390_PTRACE_H */
index bde77d7c4390ab0a6d14332cceca7dc7c8571894..37292bb5ce6065c31c21b2af61fa077de432a081 100644 (file)
@@ -6,7 +6,7 @@ RM ?= rm -f
 
 # Make the path relative to DESTDIR, not prefix
 ifndef DESTDIR
-prefix?=$(HOME)
+prefix ?= /usr/local
 endif
 mandir ?= $(prefix)/share/man
 man8dir = $(mandir)/man8
index 813826c50936b174ce11821b0e606ce0e49668cd..ec3052c0b004011573861231edcdd0d0e982c088 100644 (file)
@@ -45,8 +45,8 @@ $(LIBBPF)-clean:
        $(call QUIET_CLEAN, libbpf)
        $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
 
-prefix = /usr
-bash_compdir ?= $(prefix)/share/bash-completion/completions
+prefix = /usr/local
+bash_compdir ?= /usr/share/bash-completion/completions
 
 CC = gcc
 
@@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean
        $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
 
 install:
+       install -m 0755 -d $(prefix)/sbin
        install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
        install -m 0755 -d $(bash_compdir)
        install -m 0644 bash-completion/bpftool $(bash_compdir)
@@ -88,5 +89,5 @@ doc-install:
 
 FORCE:
 
-.PHONY: all clean FORCE
+.PHONY: all clean FORCE install doc doc-install
 .DEFAULT_GOAL := all
index d6e4762170a4464d029415edc189030f55841844..d294bc8168bed8cc72f8926c6947b98aba0cbcc6 100644 (file)
@@ -58,11 +58,19 @@ bool show_pinned;
 struct pinned_obj_table prog_table;
 struct pinned_obj_table map_table;
 
+static void __noreturn clean_and_exit(int i)
+{
+       if (json_output)
+               jsonw_destroy(&json_wtr);
+
+       exit(i);
+}
+
 void usage(void)
 {
        last_do_help(last_argc - 1, last_argv + 1);
 
-       exit(-1);
+       clean_and_exit(-1);
 }
 
 static int do_help(int argc, char **argv)
@@ -280,6 +288,7 @@ int main(int argc, char **argv)
        hash_init(prog_table.table);
        hash_init(map_table.table);
 
+       opterr = 0;
        while ((opt = getopt_long(argc, argv, "Vhpjf",
                                  options, NULL)) >= 0) {
                switch (opt) {
@@ -291,13 +300,25 @@ int main(int argc, char **argv)
                        pretty_output = true;
                        /* fall through */
                case 'j':
-                       json_output = true;
+                       if (!json_output) {
+                               json_wtr = jsonw_new(stdout);
+                               if (!json_wtr) {
+                                       p_err("failed to create JSON writer");
+                                       return -1;
+                               }
+                               json_output = true;
+                       }
+                       jsonw_pretty(json_wtr, pretty_output);
                        break;
                case 'f':
                        show_pinned = true;
                        break;
                default:
-                       usage();
+                       p_err("unrecognized option '%s'", argv[optind - 1]);
+                       if (json_output)
+                               clean_and_exit(-1);
+                       else
+                               usage();
                }
        }
 
@@ -306,15 +327,6 @@ int main(int argc, char **argv)
        if (argc < 0)
                usage();
 
-       if (json_output) {
-               json_wtr = jsonw_new(stdout);
-               if (!json_wtr) {
-                       p_err("failed to create JSON writer");
-                       return -1;
-               }
-               jsonw_pretty(json_wtr, pretty_output);
-       }
-
        bfd_init();
 
        ret = cmd_select(cmds, argc, argv, do_help);
index 9c191e222d6f824d5d5be3d84611a551a6dbdeca..bff330b49791e5bc6021d8188bd0e637db20ea3b 100644 (file)
@@ -41,6 +41,7 @@
 #include <stdbool.h>
 #include <stdio.h>
 #include <linux/bpf.h>
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/hashtable.h>
 
@@ -50,7 +51,7 @@
 
 #define NEXT_ARG()     ({ argc--; argv++; if (argc < 0) usage(); })
 #define NEXT_ARGP()    ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
-#define BAD_ARG()      ({ p_err("what is '%s'?\n", *argv); -1; })
+#define BAD_ARG()      ({ p_err("what is '%s'?", *argv); -1; })
 
 #define ERR_MAX_LEN    1024
 
@@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
 
 bool is_prefix(const char *pfx, const char *str);
 void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
-void usage(void) __attribute__((noreturn));
+void usage(void) __noreturn;
 
 struct pinned_obj_table {
        DECLARE_HASHTABLE(table, 16);
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..53815d2
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
index 0674272598205c300e7f573e38dae6087de94429..8f95303f9d807d10d4fd6850d91a2486b0a490ec 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /* Copyright (c) 2016 Facebook
  *
  * This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
 #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
 
 struct bpf_perf_event_data {
-       struct pt_regs regs;
+       bpf_user_pt_regs_t regs;
        __u64 sample_period;
 };
 
index 21322e0385b886667d7bbd9a17edddc1ad1b3c8f..09ba923debe86810f8380f7df54504dee4232ec8 100644 (file)
@@ -2,3 +2,4 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
 HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
index f47576ce13ea9da3d6220894c8c1d191a5fe889b..a8ace5cc6301f184e5d37210e00402234a014a4b 100644 (file)
@@ -2,17 +2,43 @@
 /*
  * Mapping of DWARF debug register numbers into register names.
  *
- *    Copyright IBM Corp. 2010
- *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ * Copyright IBM Corp. 2010, 2017
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *           Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *
  */
 
+#include <errno.h>
 #include <stddef.h>
-#include <dwarf-regs.h>
+#include <stdlib.h>
 #include <linux/kernel.h>
+#include <asm/ptrace.h>
+#include <string.h>
+#include <dwarf-regs.h>
 #include "dwarf-regs-table.h"
 
 const char *get_arch_regstr(unsigned int n)
 {
        return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
 }
+
+/*
+ * Convert the register name into an offset to struct pt_regs (kernel).
+ * This is required by the BPF prologue generator.  The BPF
+ * program is called in the BPF overflow handler in the perf
+ * core.
+ */
+int regs_query_register_offset(const char *name)
+{
+       unsigned long gpr;
+
+       if (!name || strncmp(name, "%r", 2))
+               return -EINVAL;
+
+       errno = 0;
+       gpr = strtoul(name + 2, NULL, 10);
+       if (errno || gpr >= 16)
+               return -EINVAL;
+
+       return offsetof(user_pt_regs, gprs) + 8 * gpr;
+}
index 77406d25e5218023bf15277f293338e4279fe144..6db9d809fe9722a9e4eb0afb5140443490aa43e3 100755 (executable)
@@ -30,6 +30,7 @@ arch/x86/include/uapi/asm/vmx.h
 arch/powerpc/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/ptrace.h
 arch/s390/include/uapi/asm/sie.h
 arch/arm/include/uapi/asm/kvm.h
 arch/arm64/include/uapi/asm/kvm.h
index c25a74ae51baef13bfa5609d2957af76941597f6..2bb3eef7d5c1fbf36d420be2801e74b4f7eeb049 100644 (file)
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
 
        dprintf("set %s as cpufreq governor\n", governor);
 
-       if (cpupower_is_cpu_online(cpu) != 0) {
+       if (cpupower_is_cpu_online(cpu) != 1) {
                perror("cpufreq_cpu_exists");
                fprintf(stderr, "error: cpu %u does not exist\n", cpu);
                return -1;
index 1b5da0066ebf90bfe4c441fab62fd26b4cc99267..5b3205f1621749bb6ebc340413ae16d957064cb9 100644 (file)
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
 {
        int num;
        char *tmp;
+       int this_cpu;
+
+       this_cpu = sched_getcpu();
 
        /* Assume idle state count is the same for all CPUs */
-       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
+       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
 
        if (cpuidle_sysfs_monitor.hw_states_num <= 0)
                return NULL;
 
        for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
-               tmp = cpuidle_state_name(0, num);
+               tmp = cpuidle_state_name(this_cpu, num);
                if (tmp == NULL)
                        continue;
 
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
                strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
                free(tmp);
 
-               tmp = cpuidle_state_desc(0, num);
+               tmp = cpuidle_state_desc(this_cpu, num);
                if (tmp == NULL)
                        continue;
                strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
index 333a48655ee0a4cfadd89a478b09f1be7dba42b5..21a2d76b67dc453b13fde4ae760b275f04285d06 100644 (file)
@@ -1,7 +1,19 @@
 # SPDX-License-Identifier: GPL-2.0
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+include $(srctree)/tools/scripts/Makefile.arch
+
+$(call detected_var,SRCARCH)
+
 LIBDIR := ../../../lib
 BPFDIR := $(LIBDIR)/bpf
 APIDIR := ../../../include/uapi
+ASMDIR:= ../../../arch/$(ARCH)/include/uapi
 GENDIR := ../../../../include/generated
 GENHDR := $(GENDIR)/autoconf.h
 
@@ -9,7 +21,7 @@ ifneq ($(wildcard $(GENHDR)),)
   GENFLAGS := -DHAVE_GENHDR
 endif
 
-CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
+CFLAGS += -Wall -O2 -I$(APIDIR) -I$(ASMDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
 LDLIBS += -lcap -lelf
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
index 3cc0b561489ea2c1b54701f1aaaf580e79c2535b..e9626cf5607ad060b070680d25986a270c5cd59c 100644 (file)
@@ -3,6 +3,8 @@
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
@@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
 
 int main(int argc, char **argv)
 {
+       struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
        char full_log[LOG_SIZE];
        char log[LOG_SIZE];
        size_t want_len;
        int i;
 
+       /* allow unlimited locked memory to have more consistent error code */
+       if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+               perror("Unable to lift memlock rlimit");
+
        memset(log, 1, LOG_SIZE);
 
        /* Test incorrect attr */
index a6524ff27de495460414b286f8181762674a84a3..a67c106d73f5c33a039d046c9f46454bbcfc801f 100644 (file)
@@ -615,7 +615,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int ret;
-       sigset_t sigsaved;
 
        if (unlikely(!kvm_vcpu_initialized(vcpu)))
                return -ENOEXEC;
@@ -633,8 +632,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        if (run->immediate_exit)
                return -EINTR;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        ret = 1;
        run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -769,8 +767,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                kvm_pmu_update_run(vcpu);
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
+
        return ret;
 }
 
index f169ecc4f2e87f44ece32540b8428529aa01ae84..c422c10cd1dd176a973b234f742414cd2443cbff 100644 (file)
@@ -2065,6 +2065,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       /*
+        * This does a lockless modification of ->real_blocked, which is fine
+        * because, only current can change ->real_blocked and all readers of
+        * ->real_blocked don't care as long ->real_blocked is always a subset
+        * of ->blocked.
+        */
+       sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
+}
+
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
+       sigemptyset(&current->real_blocked);
+}
+
 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
        unsigned int old, val, grow;