]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Merge branch 'pm-devfreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 16 May 2016 12:31:15 +0000 (14:31 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 16 May 2016 12:31:15 +0000 (14:31 +0200)
* pm-devfreq:
  PM / devfreq: style/typo fixes
  PM / devfreq: exynos: Add the detailed correlation for Exynos5422 bus
  PM / devfreq: event: Find the instance of devfreq-event device by using phandle
  PM / devfreq: event: Add new Exynos NoC probe driver
  MAINTAINERS: Add samsung bus frequency driver entry
  PM / devfreq: exynos: Remove unused exynos4/5 busfreq driver
  PM / devfreq: exynos: Add the detailed correlation between sub-blocks and power line
  PM / devfreq: exynos: Update documentation for bus devices using passive governor
  PM / devfreq: exynos: Add support of bus frequency of sub-blocks using passive governor
  PM / devfreq: Add new passive governor
  PM / devfreq: Add new DEVFREQ_TRANSITION_NOTIFIER notifier
  PM / devfreq: Add devfreq_get_devfreq_by_phandle()
  PM / devfreq: exynos: Add documentation for generic exynos bus frequency driver
  PM / devfreq: exynos: Add generic exynos bus frequency driver

294 files changed:
.mailmap
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/kernel-parameters.txt
Documentation/networking/altera_tse.txt
Documentation/networking/ipvlan.txt
Documentation/networking/pktgen.txt
Documentation/networking/vrf.txt
Documentation/networking/xfrm_sync.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/io.h
arch/arc/include/asm/mmzone.h [new file with mode: 0644]
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/mm/init.c
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/sun8i-q8-common.dtsi
arch/arm/include/asm/cpuidle.h
arch/arm/include/asm/domain.h
arch/arm/kernel/cpuidle.c
arch/arm/kernel/head-nommu.S
arch/arm/kvm/mmu.c
arch/arm/mach-berlin/berlin.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/common.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-imx/imx27-dt.c
arch/arm/mach-imx/mach-imx51.c
arch/arm/mach-imx/mach-imx53.c
arch/arm/mach-imx/mach-imx7d.c
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-rockchip/rockchip.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/common.h
arch/arm/mach-shmobile/cpufreq.c [deleted file]
arch/arm/mach-socfpga/headsmp.S
arch/arm/mach-sunxi/sunxi.c
arch/arm/mach-zynq/common.c
arch/arm/mm/nommu.c
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/parisc/kernel/syscall.S
arch/powerpc/include/asm/word-at-a-time.h
arch/sparc/configs/sparc32_defconfig
arch/sparc/configs/sparc64_defconfig
arch/sparc/include/asm/spitfire.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/cherrs.S
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/fpu_traps.S
arch/sparc/kernel/head_64.S
arch/sparc/kernel/misctrap.S
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/spiterrs.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/utrap.S
arch/sparc/kernel/vio.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/winfixup.S
arch/sparc/mm/init_64.c
arch/x86/events/amd/iommu.c
arch/x86/events/intel/core.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/sysfb_efi.c
arch/x86/kernel/tsc_msr.c
arch/x86/kvm/mmu.c
arch/x86/platform/efi/efi-bgrt.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/nfit.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci_platform.c
drivers/ata/ahci_seattle.c [new file with mode: 0644]
drivers/ata/libahci.c
drivers/base/power/opp/Makefile
drivers/base/power/opp/core.c
drivers/base/power/opp/cpu.c
drivers/base/power/opp/of.c [new file with mode: 0644]
drivers/base/power/opp/opp.h
drivers/base/property.c
drivers/clk/imx/clk-imx6q.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/arm_big_little.h
drivers/cpufreq/arm_big_little_dt.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c [new file with mode: 0644]
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq-nforce2.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_governor_attr_set.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_userspace.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/hisi-acpu-cpufreq.c [deleted file]
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/maple-cpufreq.c
drivers/cpufreq/mt8173-cpufreq.c
drivers/cpufreq/mvebu-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/pmac64-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.h
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/qoriq-cpufreq.c
drivers/cpufreq/s3c2412-cpufreq.c
drivers/cpufreq/s3c2440-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/scpi-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-lib.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/sti-cpufreq.c
drivers/cpufreq/tegra124-cpufreq.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/cpuidle/cpuidle-arm.c
drivers/cpuidle/cpuidle.c
drivers/firmware/psci.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/hid/hid-ids.h
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hv/ring_buffer.c
drivers/idle/intel_idle.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
drivers/iio/magnetometer/ak8975.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/input/misc/twl6040-vibra.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/zforce_ts.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid5.c
drivers/media/media-device.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/s3c-camif/camif-core.c
drivers/misc/mic/vop/vop_vringh.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/phy/at803x.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/nvdimm/pmem.c
drivers/nvmem/mxs-ocotp.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/soc/qcom/spm.c
drivers/usb/core/port.c
drivers/usb/core/usb.c
drivers/usb/musb/jz4740.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/serial/cp210x.c
drivers/virtio/virtio_ring.c
drivers/xen/balloon.c
drivers/xen/evtchn.c
fs/fuse/file.c
fs/pnode.c
fs/proc/base.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/udf/unicode.c
include/acpi/acpi_bus.h
include/linux/bpf.h
include/linux/cpufreq-dt.h [deleted file]
include/linux/cpufreq.h
include/linux/hash.h
include/linux/if_ether.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of.h
include/linux/page-flags.h
include/linux/pm_opp.h
include/linux/rio_mport_cdev.h [deleted file]
include/linux/sched.h
include/linux/swap.h
include/net/vxlan.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/rio_mport_cdev.h [new file with mode: 0644]
include/uapi/linux/swab.h
include/xen/page.h
kernel/bpf/inode.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/sched/Makefile
kernel/sched/core.c
kernel/sched/cpufreq.c
kernel/sched/cpufreq_schedutil.c [new file with mode: 0644]
kernel/sched/sched.h
kernel/trace/power-traces.c
kernel/trace/trace_events.c
lib/stackdepot.c
mm/compaction.c
mm/huge_memory.c
mm/memory.c
mm/page-writeback.c
mm/page_alloc.c
mm/zswap.c
net/batman-adv/bat_v.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/core/dev.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv6/ila/ila_lwt.c
net/l2tp/l2tp_core.c
net/mac80211/iface.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/sched/sch_netem.c
net/tipc/node.c
samples/bpf/trace_output_kern.c
scripts/mod/file2alias.c
security/integrity/ima/ima_policy.c

index c156a8b4d845cd3fbf29bbcb43213066dafd046b..08b80428f5837001940381417e0b1d0be1125b2c 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -69,6 +69,7 @@ Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
index 30df832a6f2f513f4e23841dd0a0ad888cc13dba..87adfb227ca92facd1b3dfe75501b21e9a610a52 100644 (file)
@@ -32,6 +32,10 @@ Optional properties:
 - target-supply     : regulator for SATA target power
 - phys              : reference to the SATA PHY node
 - phy-names         : must be "sata-phy"
+- ports-implemented : Mask that indicates which ports that the HBA supports
+                     are available for software to use. Useful if PORTS_IMPL
+                     is not programmed by the BIOS, which is true with
+                     some embedded SOC's.
 
 Required properties when using sub-nodes:
 - #address-cells    : number of cells to encode an address
index 28a4781ab6d7b9d6a1ab553ed96857f0f509e250..0ae06491b4302209607340a5f4b250c7ca1fe100 100644 (file)
@@ -45,13 +45,13 @@ Required properties:
 Optional properties:
 - dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
 - mac-address          : See ethernet.txt file in the same directory
-- phy_id               : Specifies slave phy id
+- phy_id               : Specifies slave phy id (deprecated, use phy-handle)
 - phy-handle           : See ethernet.txt file in the same directory
 
 Slave sub-nodes:
 - fixed-link           : See fixed-link.txt file in the same directory
-                         Either the property phy_id, or the sub-node
-                         fixed-link can be specified
+
+Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
index 0b3de80ec8f69c1aef9dfaa59fb86c14395ba43e..52292b28b291241935fbe6664958b1d059b59b79 100644 (file)
@@ -1661,6 +1661,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                hwp_only
                        Only load intel_pstate on systems which support
                        hardware P state control (HWP) if available.
+               support_acpi_ppc
+                       Enforce ACPI _PPC performance limits. If the Fixed ACPI
+                       Description Table, specifies preferred power management
+                       profile as "Enterprise Server" or "Performance Server",
+                       then this feature is turned on by default.
 
        intremap=       [X86-64, Intel-IOMMU]
                        on      enable Interrupt Remapping (default)
index 3f24df8c6e6557cf1cf0d9e25857dead9cd0d94d..50b8589d12fd167371c87ca457708146bb765ae3 100644 (file)
@@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
 using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
 platform bus to obtain component resources. The designs used to test this
 driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
-and tested with ARM and NIOS processor hosts seperately. The anticipated use
+and tested with ARM and NIOS processor hosts separately. The anticipated use
 cases are simple communications between an embedded system and an external peer
 for status and simple configuration of the embedded system.
 
@@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using:
 4.1) Transmit process
 When the driver's transmit routine is called by the kernel, it sets up a
 transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
-MSGDMA), and initites a transmit operation. Once the transmit is complete, an
+MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
 interrupt is driven by the transmit DMA logic. The driver handles the transmit
 completion in the context of the interrupt handling chain by recycling
 resource required to send and track the requested transmit operation.
 
 4.2) Receive process
 The driver will post receive buffers to the receive DMA logic during driver
-intialization. Receive buffers may or may not be queued depending upon the
+initialization. Receive buffers may or may not be queued depending upon the
 underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
 to queue receive buffers to the SGDMA receive logic). When a packet is
 received, the DMA logic generates an interrupt. The driver handles a receive
index cf996394e466b708d5ca02757143205d519ceb5b..14422f8fcdc474f5f32cde7868803cf67fbb0805 100644 (file)
@@ -8,7 +8,7 @@ Initial Release:
        This is conceptually very similar to the macvlan driver with one major
 exception of using L3 for mux-ing /demux-ing among slaves. This property makes
 the master device share the L2 with it's slave devices. I have developed this
-driver in conjuntion with network namespaces and not sure if there is use case
+driver in conjunction with network namespaces and not sure if there is use case
 outside of it.
 
 
@@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
 as well.
 
 4.2 L3 mode:
-       In this mode TX processing upto L3 happens on the stack instance attached
+       In this mode TX processing up to L3 happens on the stack instance attached
 to the slave device and packets are switched to the stack instance of the
 master device for the L2 processing and routing from that instance will be
 used before packets are queued on the outbound device. In this mode the slaves
@@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan -
        (a) The Linux host that is connected to the external switch / router has
 policy configured that allows only one mac per port.
        (b) No of virtual devices created on a master exceed the mac capacity and
-puts the NIC in promiscous mode and degraded performance is a concern.
+puts the NIC in promiscuous mode and degraded performance is a concern.
        (c) If the slave device is to be put into the hostile / untrusted network
 namespace where L2 on the slave could be changed / misused.
 
index f4be85e9600578e7411f1baa1ab37041a677fe4a..2c4e3354e12891e755de5986b6f9fa82eff0657b 100644 (file)
@@ -67,12 +67,12 @@ The two basic thread commands are:
  * add_device DEVICE@NAME -- adds a single device
  * rem_device_all         -- remove all associated devices
 
-When adding a device to a thread, a corrosponding procfile is created
+When adding a device to a thread, a corresponding procfile is created
 which is used for configuring this device. Thus, device names need to
 be unique.
 
 To support adding the same device to multiple threads, which is useful
-with multi queue NICs, the device naming scheme is extended with "@":
+with multi queue NICs, the device naming scheme is extended with "@":
  device@something
 
 The part after "@" can be anything, but it is custom to use the thread
@@ -221,7 +221,7 @@ Sample scripts
 
 A collection of tutorial scripts and helpers for pktgen is in the
 samples/pktgen directory. The helper parameters.sh file support easy
-and consistant parameter parsing across the sample scripts.
+and consistent parameter parsing across the sample scripts.
 
 Usage example and help:
  ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
index d52aa10cfe911c88b47927c25cfd8ef596c65986..5da679c573d2326c88cdcd6eccde9805f8dd06dc 100644 (file)
@@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through
 the VRF device. Similarly on egress routing rules are used to send packets
 to the VRF device driver before getting sent out the actual interface. This
 allows tcpdump on a VRF device to capture all packets into and out of the
-VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
 using the VRF device to specify rules that apply to the VRF domain as a whole.
 
 [1] Packets in the forwarded state do not flow through the device, so those
index d7aac9dedeb4266d970e8cc910726a027305a0b3..8d88e0f2ec493be2e519dbaec3e7995f5027a325 100644 (file)
@@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches
 from Jamal <hadi@cyberus.ca>.
 
 The end goal for syncing is to be able to insert attributes + generate
-events so that the an SA can be safely moved from one machine to another
+events so that the SA can be safely moved from one machine to another
 for HA purposes.
 The idea is to synchronize the SA so that the takeover machine can do
 the processing of the SA as accurate as possible if it has access to it.
@@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events.
 These patches add ability to sync and have accurate lifetime byte (to
 ensure proper decay of SAs) and replay counters to avoid replay attacks
 with as minimal loss at failover time.
-This way a backup stays as closely uptodate as an active member.
+This way a backup stays as closely up-to-date as an active member.
 
 Because the above items change for every packet the SA receives,
 it is possible for a lot of the events to be generated.
@@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that
 there is a period where the timer threshold expires with no packets
 seen, then an odd behavior is seen as follows:
 The first packet arrival after a timer expiry will trigger a timeout
-aevent; i.e we dont wait for a timeout period or a packet threshold
+event; i.e we don't wait for a timeout period or a packet threshold
 to be reached. This is done for simplicity and efficiency reasons.
 
 -JHS
index 7e9da3aa9851aff6389fa5299e39d8075321d527..945b395f4bb1b0e13c5bb655a36d8f763efa5a0c 100644 (file)
@@ -872,9 +872,9 @@ F:  drivers/perf/arm_pmu.c
 F:     include/linux/perf/arm_pmu.h
 
 ARM PORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/
 
@@ -886,35 +886,35 @@ F:        arch/arm/plat-*/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
 
 ARM PRIMECELL AACI PL041 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     sound/arm/aaci.*
 
 ARM PRIMECELL CLCD PL110 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/video/fbdev/amba-clcd.*
 
 ARM PRIMECELL KMI PL050 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/input/serio/ambakmi.*
 F:     include/linux/amba/kmi.h
 
 ARM PRIMECELL MMCI PL180/1 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/mmc/host/mmci.*
 F:     include/linux/amba/mmci.h
 
 ARM PRIMECELL UART PL010 AND PL011 DRIVERS
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/tty/serial/amba-pl01*.c
 F:     include/linux/amba/serial.h
 
 ARM PRIMECELL BUS SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/amba/
 F:     include/linux/amba/bus.h
@@ -1036,7 +1036,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 
 ARM/CLKDEV SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/include/asm/clkdev.h
@@ -1093,9 +1093,9 @@ F:        arch/arm/boot/dts/cx92755*
 N:     digicolor
 
 ARM/EBSA110 MACHINE SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/mach-ebsa110/
 F:     drivers/net/ethernet/amd/am79c961a.*
@@ -1124,9 +1124,9 @@ T:        git git://git.berlios.de/gemini-board
 F:     arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/include/asm/hardware/dec21285.h
 F:     arch/arm/mach-footbridge/
@@ -1322,6 +1322,7 @@ F:        drivers/rtc/rtc-armada38x.c
 F:     arch/arm/boot/dts/armada*
 F:     arch/arm/boot/dts/kirkwood*
 F:     arch/arm64/boot/dts/marvell/armada*
+F:     drivers/cpufreq/mvebu-cpufreq.c
 
 
 ARM/Marvell Berlin SoC support
@@ -1457,7 +1458,7 @@ S:        Maintained
 ARM/PT DIGITAL BOARD PORT
 M:     Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 
 ARM/QUALCOMM SUPPORT
@@ -1493,9 +1494,9 @@ S:        Supported
 F:     arch/arm64/boot/dts/renesas/
 
 ARM/RISCPC ARCHITECTURE
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/include/asm/hardware/entry-macro-iomd.S
 F:     arch/arm/include/asm/hardware/ioc.h
@@ -1773,9 +1774,9 @@ F:        drivers/clk/versatile/clk-vexpress-osc.c
 F:     drivers/clocksource/versatile.c
 
 ARM/VFP SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/vfp/
 
@@ -2921,7 +2922,7 @@ F:        mm/cleancache.c
 F:     include/linux/cleancache.h
 
 CLK API
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-clk@vger.kernel.org
 S:     Maintained
 F:     include/linux/clk.h
@@ -3354,9 +3355,9 @@ S:        Supported
 F:     drivers/net/ethernet/stmicro/stmmac/
 
 CYBERPRO FB DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     drivers/video/fbdev/cyber2000fb.*
 
@@ -3890,7 +3891,7 @@ F:        Documentation/devicetree/bindings/display/st,stih4xx.txt
 
 DRM DRIVERS FOR VIVANTE GPU IP
 M:     Lucas Stach <l.stach@pengutronix.de>
-R:     Russell King <linux+etnaviv@arm.linux.org.uk>
+R:     Russell King <linux+etnaviv@armlinux.org.uk>
 R:     Christian Gmeiner <christian.gmeiner@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
@@ -4232,8 +4233,8 @@ F:        Documentation/efi-stub.txt
 F:     arch/ia64/kernel/efi.c
 F:     arch/x86/boot/compressed/eboot.[ch]
 F:     arch/x86/include/asm/efi.h
-F:     arch/x86/platform/efi/*
-F:     drivers/firmware/efi/*
+F:     arch/x86/platform/efi/
+F:     drivers/firmware/efi/
 F:     include/linux/efi*.h
 
 EFI VARIABLE FILESYSTEM
@@ -4753,7 +4754,7 @@ F:        drivers/platform/x86/fujitsu-tablet.c
 
 FUSE: FILESYSTEM IN USERSPACE
 M:     Miklos Szeredi <miklos@szeredi.hu>
-L:     fuse-devel@lists.sourceforge.net
+L:     linux-fsdevel@vger.kernel.org
 W:     http://fuse.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
 S:     Maintained
@@ -4912,7 +4913,7 @@ F:        net/ipv4/gre_offload.c
 F:     include/net/gre.h
 
 GRETH 10/100/1G Ethernet MAC device driver
-M:     Kristoffer Glembo <kristoffer@gaisler.com>
+M:     Andreas Larsson <andreas@gaisler.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/aeroflex/
@@ -6914,7 +6915,7 @@ L:        linux-man@vger.kernel.org
 S:     Maintained
 
 MARVELL ARMADA DRM SUPPORT
-M:     Russell King <rmk+kernel@arm.linux.org.uk>
+M:     Russell King <rmk+kernel@armlinux.org.uk>
 S:     Maintained
 F:     drivers/gpu/drm/armada/
 
@@ -7914,7 +7915,7 @@ S:        Supported
 F:     drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:     Russell King <rmk+kernel@arm.linux.org.uk>
+M:     Russell King <rmk+kernel@armlinux.org.uk>
 S:     Supported
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
@@ -7987,7 +7988,7 @@ F:        arch/arm/*omap*/*pm*
 F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
-M:     Rajendra Nayak <rnayak@ti.com>
+M:     Rajendra Nayak <rnayak@codeaurora.org>
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
@@ -10023,7 +10024,8 @@ F:      drivers/infiniband/hw/ocrdma/
 
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:     Shradha Shah <sshah@solarflare.com>
+M:     Edward Cree <ecree@solarflare.com>
+M:     Bert Kenward <bkenward@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
index 7466de60ddc7ab98c66c7a28f3cd4a502f465411..acf6155421cc244913b71c6d3b95cd17690e64ff 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Charred Weasel
 
 # *DOCUMENTATION*
index ec4791ea691196bf7137b99ead768b3cc030a056..a8767430df7d6012463c8a34366d251496d15364 100644 (file)
@@ -58,6 +58,9 @@ config GENERIC_CSUM
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
+config ARCH_DISCONTIGMEM_ENABLE
+       def_bool y
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
 
@@ -347,6 +350,15 @@ config ARC_HUGEPAGE_16M
 
 endchoice
 
+config NODES_SHIFT
+       int "Maximum NUMA Nodes (as a power of 2)"
+       default "1" if !DISCONTIGMEM
+       default "2" if DISCONTIGMEM
+       depends on NEED_MULTIPLE_NODES
+       ---help---
+         Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
+         zones.
+
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
@@ -455,6 +467,7 @@ config LINUX_LINK_BASE
 
 config HIGHMEM
        bool "High Memory Support"
+       select DISCONTIGMEM
        help
          With ARC 2G:2G address split, only upper 2G is directly addressable by
          kernel. Enable this to potentially allow access to rest of 2G and PAE
index 17f85c9c73cfe830a40280ad79bf9decb1b65a17..c22b181e8206f3162c4e0e19214f8b303f13c576 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
 extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
 extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
                                  unsigned long flags);
@@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr);
 #define ioremap_wc(phy, sz)            ioremap(phy, sz)
 #define ioremap_wt(phy, sz)            ioremap(phy, sz)
 
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p)          ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p)          ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p)       ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p)       ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
 /* Change struct page to physical address */
 #define page_to_phys(page)             (page_to_pfn(page) << PAGE_SHIFT)
 
@@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb()              rmb()
-#define __iowmb()              wmb()
-#else
-#define __iormb()              do { } while (0)
-#define __iowmb()              do { } while (0)
-#endif
-
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
new file mode 100644 (file)
index 0000000..8e97136
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMZONE_H
+#define _ASM_ARC_MMZONE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern struct pglist_data node_data[];
+#define NODE_DATA(nid) (&node_data[nid])
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+       int is_end_low = 1;
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
+               is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
+
+       /*
+        * node 0: lowmem:             0x8000_0000   to 0xFFFF_FFFF
+        * node 1: HIGHMEM w/o  PAE40: 0x0           to 0x7FFF_FFFF
+        *         HIGHMEM with PAE40: 0x1_0000_0000 to ...
+        */
+       if (pfn >= ARCH_PFN_OFFSET && is_end_low)
+               return 0;
+
+       return 1;
+}
+
+static inline int pfn_valid(unsigned long pfn)
+{
+       int nid = pfn_to_nid(pfn);
+
+       return (pfn <= node_end_pfn(nid));
+}
+#endif /* CONFIG_DISCONTIGMEM  */
+
+#endif
index 36da89e2c853ef0551bfcee0f145454e23a81dd6..0d53854884d047557a981fece15936715a4cd950 100644 (file)
@@ -72,11 +72,20 @@ typedef unsigned long pgprot_t;
 
 typedef pte_t * pgtable_t;
 
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 
 #define ARCH_PFN_OFFSET                virt_to_pfn(CONFIG_LINUX_LINK_BASE)
 
+#ifdef CONFIG_FLATMEM
 #define pfn_valid(pfn)         (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#endif
 
 /*
  * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
@@ -85,12 +94,10 @@ typedef pte_t * pgtable_t;
  * virt here means link-address/program-address as embedded in object code.
  * And for ARC, link-addr = physical address
  */
-#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __pa(vaddr)  ((unsigned long)(vaddr))
 #define __va(paddr)  ((void *)((unsigned long)(paddr)))
 
-#define virt_to_page(kaddr)    \
-       (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
-
+#define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
index 7d6c93e63adf3af60a0642b03b3ae798061b1e65..10d4b8b8e5450e83468f96f16d9ad18b5c62d3b0 100644 (file)
@@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 #define pmd_present(x)                 (pmd_val(x))
 #define pmd_clear(xp)                  do { pmd_val(*(xp)) = 0; } while (0)
 
-#define pte_page(pte)  \
-       (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
-
+#define pte_page(pte)          pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)     pfn_pte(page_to_pfn(page), prot)
-#define pte_pfn(pte)           virt_to_pfn(pte_val(pte))
-#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
-                                pgprot_val(prot)))
-#define __pte_index(addr)      (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
+#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
+#define __pte_index(addr)      (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
 /*
  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
index 5487d0b974005621a8cfaf119bdbf1f687d9ff7b..8be930394750a7cf7039a9dcc85cf6ee1994f4e6 100644 (file)
@@ -30,11 +30,16 @@ static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
 static unsigned long low_mem_sz;
 
 #ifdef CONFIG_HIGHMEM
-static unsigned long min_high_pfn;
+static unsigned long min_high_pfn, max_high_pfn;
 static u64 high_mem_start;
 static u64 high_mem_sz;
 #endif
 
+#ifdef CONFIG_DISCONTIGMEM
+struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+#endif
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
@@ -109,13 +114,11 @@ void __init setup_arch_memory(void)
        /* Last usable page of low mem */
        max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
 
-#ifdef CONFIG_HIGHMEM
-       min_high_pfn = PFN_DOWN(high_mem_start);
-       max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#ifdef CONFIG_FLATMEM
+       /* pfn_valid() uses this */
+       max_mapnr = max_low_pfn - min_low_pfn;
 #endif
 
-       max_mapnr = max_pfn - min_low_pfn;
-
        /*------------- bootmem allocator setup -----------------------*/
 
        /*
@@ -129,7 +132,7 @@ void __init setup_arch_memory(void)
         * the crash
         */
 
-       memblock_add(low_mem_start, low_mem_sz);
+       memblock_add_node(low_mem_start, low_mem_sz, 0);
        memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -149,13 +152,6 @@ void __init setup_arch_memory(void)
        zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
        zones_holes[ZONE_NORMAL] = 0;
 
-#ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
-
-       /* This handles the peripheral address space hole */
-       zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
-#endif
-
        /*
         * We can't use the helper free_area_init(zones[]) because it uses
         * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
@@ -168,6 +164,34 @@ void __init setup_arch_memory(void)
                            zones_holes);       /* holes */
 
 #ifdef CONFIG_HIGHMEM
+       /*
+        * Populate a new node with highmem
+        *
+        * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
+        * than addresses in normal ala low memory (0x8000_0000 based).
+        * Even with PAE, the huge peripheral space hole would waste a lot of
+        * mem with single mem_map[]. This warrants a mem_map per region design.
+        * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
+        *
+        * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
+        * populated with normal memory zone while node 1 only has highmem
+        */
+       node_set_online(1);
+
+       min_high_pfn = PFN_DOWN(high_mem_start);
+       max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+
+       zones_size[ZONE_NORMAL] = 0;
+       zones_holes[ZONE_NORMAL] = 0;
+
+       zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
+       zones_holes[ZONE_HIGHMEM] = 0;
+
+       free_area_init_node(1,                  /* node-id */
+                           zones_size,         /* num pages per zone */
+                           min_high_pfn,       /* first pfn of node */
+                           zones_holes);       /* holes */
+
        high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
        kmap_init();
 #endif
@@ -185,7 +209,7 @@ void __init mem_init(void)
        unsigned long tmp;
 
        reset_all_zones_managed_pages();
-       for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+       for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
                free_highmem_page(pfn_to_page(tmp));
 #endif
 
index b3c26a96a7262bdc2356060f900cc9b0c8bbd8eb..d9e2d9c6e999e4aa6f3a4c8f582fe9caf74953f4 100644 (file)
        regulator-name = "V28";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <2800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on; /* due to battery cover sensor */
 };
 
        regulator-name = "VCSI";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux3 {
        regulator-name = "VMMC2_30";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <3000000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux4 {
        regulator-name = "VCAM_ANA_28";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <2800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc1 {
        regulator-name = "VMMC1";
        regulator-min-microvolt = <1850000>;
        regulator-max-microvolt = <3150000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc2 {
        regulator-name = "V28_A";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <3000000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on; /* due VIO leak to AIC34 VDDs */
 };
 
        regulator-name = "VPLL";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on;
 };
 
        regulator-name = "VSDI_CSI";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on;
 };
 
        regulator-name = "VMMC2_IO_18";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vio {
index 387dc31822fe9c8b2729d28da389d410ffb55fe7..96f8ce7bd2afc0aa5507d81cf98e59b4eddd61ec 100644 (file)
@@ -46,7 +46,7 @@
                               0x480bd800 0x017c>;
                        interrupts = <24>;
                        iommus = <&mmu_isp>;
-                       syscon = <&scm_conf 0xdc>;
+                       syscon = <&scm_conf 0x6c>;
                        ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>;
                        #clock-cells = <1>;
                        ports {
index 902657d6713b073df82d33158d3d1cd0fac5ef16..914bf4c47404f641f823a122c26ff93127f8d25e 100644 (file)
                                ldo1_reg: ldo1 {
                                        /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo4_reg: ldo4 {
                                        /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
index ecc591dc0778ddbf77222d417884387511d31f0d..4d87d9c6c86d85ec32ae1bc4444fdd06ed5b317e 100644 (file)
                                ldo1_reg: ldo1 {
                                        /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo4_reg: ldo4 {
                                        /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
index 38805ebbe2ba49f5dcd2ee92d80cde1e324cbb56..120b6b80cd39eacc2c874768e89eb14ab147b531 100644 (file)
                        omap5_pmx_wkup: pinmux@c840 {
                                compatible = "ti,omap5-padconf",
                                             "pinctrl-single";
-                               reg = <0xc840 0x0038>;
+                               reg = <0xc840 0x003c>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                                #interrupt-cells = <1>;
index 65d0e8d9825947c68b06e53b162d10116df084c0..04f541bffbdd52d677c77cc717d2d82c63f3c549 100644 (file)
                };
 
                sata0: sata@29000000 {
-                       compatible              = "generic-ahci";
+                       compatible              = "qcom,apq8064-ahci", "generic-ahci";
                        status                  = "disabled";
                        reg                     = <0x29000000 0x180>;
                        interrupts              = <GIC_SPI 209 IRQ_TYPE_NONE>;
 
                        phys                    = <&sata_phy0>;
                        phy-names               = "sata-phy";
+                       ports-implemented       = <0x1>;
                };
 
                /* Temporary fixed regulator */
index 9d2b7e2f5975ed3dae7d9d107a5a0ed5c80369e9..346a49d805a7becd411cda921c7508edb35a3aa8 100644 (file)
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
        regulator-name = "vcc-lcd";
 };
 
index 3848259bebf85786d39d4212d74f2e1646b98a44..baefe1d515174d77effa6195b4adde27ef35085f 100644 (file)
@@ -36,7 +36,7 @@ struct cpuidle_ops {
 
 struct of_cpuidle_method {
        const char *method;
-       struct cpuidle_ops *ops;
+       const struct cpuidle_ops *ops;
 };
 
 #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)                 \
index fc8ba1663601e0743a05b7cda52703df9f9bc07a..99d9f630d6b6838954bde8ddbdd90b0b5ace17b8 100644 (file)
@@ -84,6 +84,7 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_CPU_CP15_MMU
 static inline unsigned int get_domain(void)
 {
        unsigned int domain;
@@ -103,6 +104,16 @@ static inline void set_domain(unsigned val)
          : : "r" (val) : "memory");
        isb();
 }
+#else
+static inline unsigned int get_domain(void)
+{
+       return 0;
+}
+
+static inline void set_domain(unsigned val)
+{
+}
+#endif
 
 #ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)                                        \
index 703926e7007b4e0000b689006722780a02588e04..a44b268e12e1a2acbc1990afc8b5c2c65512256f 100644 (file)
@@ -70,7 +70,7 @@ int arm_cpuidle_suspend(int index)
  *
  * Returns a struct cpuidle_ops pointer, NULL if not found.
  */
-static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
+static const struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
 {
        struct of_cpuidle_method *m = __cpuidle_method_of_table;
 
@@ -88,7 +88,7 @@ static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
  *
  * Get the method name defined in the 'enable-method' property, retrieve the
  * associated cpuidle_ops and do a struct copy. This copy is needed because all
- * cpuidle_ops are tagged __initdata and will be unloaded after the init
+ * cpuidle_ops are tagged __initconst and will be unloaded after the init
  * process.
  *
  * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
@@ -97,7 +97,7 @@ static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
 {
        const char *enable_method;
-       struct cpuidle_ops *ops;
+       const struct cpuidle_ops *ops;
 
        enable_method = of_get_property(dn, "enable-method", NULL);
        if (!enable_method)
index 9b8c5a1134347aea35b7c60850d2b671efc9d768..fb1a69eb49c1a842fc604dc2b64f905e30a39aac 100644 (file)
@@ -236,7 +236,7 @@ ENTRY(__setup_mpu)
        mov     r0, #CONFIG_VECTORS_BASE        @ Cover from VECTORS_BASE
        ldr     r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
        /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
-       mov     r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
+       mov     r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
 
        setup_region r0, r5, r6, MPU_DATA_SIDE  @ VECTORS_BASE, PL0 NA, enabled
        beq     3f                              @ Memory-map not unified
index 58dbd5c439df45bc10497954db0e61b433646cd6..d6d4191e68f23cd1f80d8faf95fa1405d156b30a 100644 (file)
@@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
        kvm_pfn_t pfn = *pfnp;
        gfn_t gfn = *ipap >> PAGE_SHIFT;
 
-       if (PageTransCompound(pfn_to_page(pfn))) {
+       if (PageTransCompoundMap(pfn_to_page(pfn))) {
                unsigned long mask;
                /*
                 * The address we faulted on is backed by a transparent huge
index 25d73870cccad498e98eab1c4a43666a36fbaa9c..ac181c6797ee5784c2f64d80ea1b1f4b2d0fc3b1 100644 (file)
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 
-static void __init berlin_init_late(void)
-{
-       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-}
-
 static const char * const berlin_dt_compat[] = {
        "marvell,berlin",
        NULL,
@@ -30,7 +25,6 @@ static const char * const berlin_dt_compat[] = {
 
 DT_MACHINE_START(BERLIN_DT, "Marvell Berlin")
        .dt_compat      = berlin_dt_compat,
-       .init_late      = berlin_init_late,
        /*
         * with DT probing for L2CCs, berlin_init_machine can be removed.
         * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
index d97c588550ad467775dfe8b05487eb03d540c2f4..bc4e63fa9808932b451931d13906d69dbe2bd3ee 100644 (file)
@@ -121,6 +121,11 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context)
        const char *partnum = NULL;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
 
+       if (!IS_BUILTIN(CONFIG_NVMEM)) {
+               pr_warn("Factory Config not available without CONFIG_NVMEM\n");
+               goto bad_config;
+       }
+
        ret = nvmem_device_read(nvmem, 0, sizeof(factory_config),
                                &factory_config);
        if (ret != sizeof(struct factory_config)) {
index f55ef2ef2f92eb88c9c7485e5bfda537af126d31..742133b7266a64d7ca551a475f64e82ff2fa3841 100644 (file)
@@ -33,6 +33,11 @@ void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
        char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
        off_t offset = (off_t)context;
 
+       if (!IS_BUILTIN(CONFIG_NVMEM)) {
+               pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n");
+               return;
+       }
+
        /* Read MAC addr from EEPROM */
        if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
                pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
index bbf51a46f772d3a6d2b8a0e4f1546f783ec97022..4d3b056fd786e3a087d0c5837ea2ec2cbfc67cf7 100644 (file)
@@ -213,33 +213,6 @@ static void __init exynos_init_irq(void)
        exynos_map_pmu();
 }
 
-static const struct of_device_id exynos_cpufreq_matches[] = {
-       { .compatible = "samsung,exynos3250", .data = "cpufreq-dt" },
-       { .compatible = "samsung,exynos4210", .data = "cpufreq-dt" },
-       { .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
-       { .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
-       { .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
-#ifndef CONFIG_BL_SWITCHER
-       { .compatible = "samsung,exynos5420", .data = "cpufreq-dt" },
-       { .compatible = "samsung,exynos5800", .data = "cpufreq-dt" },
-#endif
-       { /* sentinel */ }
-};
-
-static void __init exynos_cpufreq_init(void)
-{
-       struct device_node *root = of_find_node_by_path("/");
-       const struct of_device_id *match;
-
-       match = of_match_node(exynos_cpufreq_matches, root);
-       if (!match) {
-               platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
-               return;
-       }
-
-       platform_device_register_simple(match->data, -1, NULL, 0);
-}
-
 static void __init exynos_dt_machine_init(void)
 {
        /*
@@ -262,8 +235,6 @@ static void __init exynos_dt_machine_init(void)
            of_machine_is_compatible("samsung,exynos5250"))
                platform_device_register(&exynos_cpuidle);
 
-       exynos_cpufreq_init();
-
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index 7c21760f590ffd0d4cd47fcafcbcaffe64a85952..875a2bab64f67b1a7dda20e216a264f01679f8a9 100644 (file)
@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
                        if (IS_ERR(pd->clk[i]))
                                break;
 
-                       if (IS_ERR(pd->clk[i]))
+                       if (IS_ERR(pd->pclk[i]))
                                continue; /* Skip on first power up */
                        if (clk_set_parent(pd->clk[i], pd->pclk[i]))
                                pr_err("%s: error setting parent to clock%d\n",
index bd42d1bd10aff1a1f0f28db8ea4fe3fac7ee4adc..530a728c2acc2d9e5d5e7398109de02fc58805ca 100644 (file)
 #include "common.h"
 #include "mx27.h"
 
-static void __init imx27_dt_init(void)
-{
-       struct platform_device_info devinfo = { .name = "cpufreq-dt", };
-
-       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
-       platform_device_register_full(&devinfo);
-}
-
 static const char * const imx27_dt_board_compat[] __initconst = {
        "fsl,imx27",
        NULL
@@ -36,6 +27,5 @@ DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
        .map_io         = mx27_map_io,
        .init_early     = imx27_init_early,
        .init_irq       = mx27_init_irq,
-       .init_machine   = imx27_dt_init,
        .dt_compat      = imx27_dt_board_compat,
 MACHINE_END
index 6883fbaf9484b2da00324d42ce2ada3e70cefc66..10a82a4f1e5889894c04519cf257a7059e282191 100644 (file)
@@ -50,13 +50,10 @@ static void __init imx51_ipu_mipi_setup(void)
 
 static void __init imx51_dt_init(void)
 {
-       struct platform_device_info devinfo = { .name = "cpufreq-dt", };
-
        imx51_ipu_mipi_setup();
        imx_src_init();
 
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-       platform_device_register_full(&devinfo);
 }
 
 static void __init imx51_init_late(void)
index 86316a979297fbafdafb815e4a140a24540d4bc8..18b5c5c136db94aeb0456e3fd248f7943e7f85c2 100644 (file)
@@ -40,8 +40,6 @@ static void __init imx53_dt_init(void)
 static void __init imx53_init_late(void)
 {
        imx53_pm_init();
-
-       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
 }
 
 static const char * const imx53_dt_board_compat[] __initconst = {
index 5a27f20c9a82b84fbeb39a2dd402348c7e0c6862..b450f525a670961b79cd0b3d28271a238dba70a1 100644 (file)
@@ -105,11 +105,6 @@ static void __init imx7d_init_irq(void)
        irqchip_init();
 }
 
-static void __init imx7d_init_late(void)
-{
-       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-}
-
 static const char *const imx7d_dt_compat[] __initconst = {
        "fsl,imx7d",
        NULL,
@@ -117,7 +112,6 @@ static const char *const imx7d_dt_compat[] __initconst = {
 
 DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)")
        .init_irq       = imx7d_init_irq,
-       .init_late      = imx7d_init_late,
        .init_machine   = imx7d_init_machine,
        .dt_compat      = imx7d_dt_compat,
 MACHINE_END
index ed8fda4cd055848e871bb0f592b23735d4c843aa..b44442338e4e4730630e92b1a372d1b6a175bb8a 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/clk.h>
 #include <linux/cpu_pm.h>
-#include <linux/cpufreq-dt.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -29,7 +28,6 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/pm_opp.h>
 #include <linux/resource.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
@@ -608,86 +606,3 @@ int mvebu_pmsu_dfs_request(int cpu)
 
        return 0;
 }
-
-struct cpufreq_dt_platform_data cpufreq_dt_pd = {
-       .independent_clocks = true,
-};
-
-static int __init armada_xp_pmsu_cpufreq_init(void)
-{
-       struct device_node *np;
-       struct resource res;
-       int ret, cpu;
-
-       if (!of_machine_is_compatible("marvell,armadaxp"))
-               return 0;
-
-       /*
-        * In order to have proper cpufreq handling, we need to ensure
-        * that the Device Tree description of the CPU clock includes
-        * the definition of the PMU DFS registers. If not, we do not
-        * register the clock notifier and the cpufreq driver. This
-        * piece of code is only for compatibility with old Device
-        * Trees.
-        */
-       np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
-       if (!np)
-               return 0;
-
-       ret = of_address_to_resource(np, 1, &res);
-       if (ret) {
-               pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
-               of_node_put(np);
-               return 0;
-       }
-
-       of_node_put(np);
-
-       /*
-        * For each CPU, this loop registers the operating points
-        * supported (which are the nominal CPU frequency and half of
-        * it), and registers the clock notifier that will take care
-        * of doing the PMSU part of a frequency transition.
-        */
-       for_each_possible_cpu(cpu) {
-               struct device *cpu_dev;
-               struct clk *clk;
-               int ret;
-
-               cpu_dev = get_cpu_device(cpu);
-               if (!cpu_dev) {
-                       pr_err("Cannot get CPU %d\n", cpu);
-                       continue;
-               }
-
-               clk = clk_get(cpu_dev, 0);
-               if (IS_ERR(clk)) {
-                       pr_err("Cannot get clock for CPU %d\n", cpu);
-                       return PTR_ERR(clk);
-               }
-
-               /*
-                * In case of a failure of dev_pm_opp_add(), we don't
-                * bother with cleaning up the registered OPP (there's
-                * no function to do so), and simply cancel the
-                * registration of the cpufreq device.
-                */
-               ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
-               if (ret) {
-                       clk_put(clk);
-                       return ret;
-               }
-
-               ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
-               if (ret) {
-                       clk_put(clk);
-                       return ret;
-               }
-       }
-
-       platform_device_register_data(NULL, "cpufreq-dt", -1,
-                                     &cpufreq_dt_pd, sizeof(cpufreq_dt_pd));
-       return 0;
-}
-
-device_initcall(armada_xp_pmsu_cpufreq_init);
index 58920bc8807bce963536296bc7914db421c2b6aa..2f7b11da7d5d9ce23e0399aaf240d4ef73edacf1 100644 (file)
@@ -277,13 +277,10 @@ static void __init omap4_init_voltages(void)
 
 static inline void omap_init_cpufreq(void)
 {
-       struct platform_device_info devinfo = { };
+       struct platform_device_info devinfo = { .name = "omap-cpufreq" };
 
        if (!of_have_populated_dt())
-               devinfo.name = "omap-cpufreq";
-       else
-               devinfo.name = "cpufreq-dt";
-       platform_device_register_full(&devinfo);
+               platform_device_register_full(&devinfo);
 }
 
 static int __init omap2_common_pm_init(void)
index 3f07cc5dfe5fc594b75361a62417e76ff6cfc550..beb71da5d9c8feb8a482b1179a61d61499212318 100644 (file)
@@ -74,7 +74,6 @@ static void __init rockchip_dt_init(void)
 {
        rockchip_suspend_init();
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-       platform_device_register_simple("cpufreq-dt", 0, NULL, 0);
 }
 
 static const char * const rockchip_board_dt_compat[] = {
index a65c80ac9009d51f1e54fd0b07e58336d8241e02..c9ea0e6ff4f906dcc49bb410aa252c13692972a3 100644 (file)
@@ -38,7 +38,6 @@ smp-$(CONFIG_ARCH_EMEV2)      += smp-emev2.o headsmp-scu.o platsmp-scu.o
 
 # PM objects
 obj-$(CONFIG_SUSPEND)          += suspend.o
-obj-$(CONFIG_CPU_FREQ)         += cpufreq.o
 obj-$(CONFIG_PM_RCAR)          += pm-rcar.o
 obj-$(CONFIG_PM_RMOBILE)       += pm-rmobile.o
 obj-$(CONFIG_ARCH_RCAR_GEN2)   += pm-rcar-gen2.o
index 5464b7a75e3028a792e4718a2cc68b8dd7a1fc0b..3b562d87826d74100d861373c039df8f995cd6e0 100644 (file)
@@ -25,16 +25,9 @@ static inline int shmobile_suspend_init(void) { return 0; }
 static inline void shmobile_smp_apmu_suspend_init(void) { }
 #endif
 
-#ifdef CONFIG_CPU_FREQ
-int shmobile_cpufreq_init(void);
-#else
-static inline int shmobile_cpufreq_init(void) { return 0; }
-#endif
-
 static inline void __init shmobile_init_late(void)
 {
        shmobile_suspend_init();
-       shmobile_cpufreq_init();
 }
 
 #endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/cpufreq.c b/arch/arm/mach-shmobile/cpufreq.c
deleted file mode 100644 (file)
index 634d701..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * CPUFreq support code for SH-Mobile ARM
- *
- *  Copyright (C) 2014 Gaku Inami
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/platform_device.h>
-
-#include "common.h"
-
-int __init shmobile_cpufreq_init(void)
-{
-       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-       return 0;
-}
index 5d94b7a2fb108dc1bc1c5fcdd4edfa80ab2c0c7f..c160fa3007e943be451b82318149a69dce4d7e54 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/assembler.h>
 
        .arch   armv7-a
+       .arm
 
 ENTRY(secondary_trampoline)
        /* CPU1 will always fetch from 0x0 when it is brought out of reset.
index 3c156190a1d44223027b4c9a37ab67550219854b..95dca8c2c9edcd5a72276dd05ab8572c7dd7bd9e 100644 (file)
 
 #include <asm/mach/arch.h>
 
-static void __init sunxi_dt_cpufreq_init(void)
-{
-       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-}
-
 static const char * const sunxi_board_dt_compat[] = {
        "allwinner,sun4i-a10",
        "allwinner,sun5i-a10s",
@@ -32,7 +27,6 @@ static const char * const sunxi_board_dt_compat[] = {
 
 DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families")
        .dt_compat      = sunxi_board_dt_compat,
-       .init_late      = sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun6i_board_dt_compat[] = {
@@ -53,7 +47,6 @@ static void __init sun6i_timer_init(void)
 DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family")
        .init_time      = sun6i_timer_init,
        .dt_compat      = sun6i_board_dt_compat,
-       .init_late      = sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun7i_board_dt_compat[] = {
@@ -63,7 +56,6 @@ static const char * const sun7i_board_dt_compat[] = {
 
 DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
        .dt_compat      = sun7i_board_dt_compat,
-       .init_late      = sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun8i_board_dt_compat[] = {
@@ -77,7 +69,6 @@ static const char * const sun8i_board_dt_compat[] = {
 DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
        .init_time      = sun6i_timer_init,
        .dt_compat      = sun8i_board_dt_compat,
-       .init_late      = sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun9i_board_dt_compat[] = {
index 860ffb663f02b1c08c15c55f1c3fbf7638e610a8..da876d28ccbc53c1e82f042bcf5f5e730ed2b976 100644 (file)
@@ -110,7 +110,6 @@ static void __init zynq_init_late(void)
  */
 static void __init zynq_init_machine(void)
 {
-       struct platform_device_info devinfo = { .name = "cpufreq-dt", };
        struct soc_device_attribute *soc_dev_attr;
        struct soc_device *soc_dev;
        struct device *parent = NULL;
@@ -145,7 +144,6 @@ out:
        of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
 
        platform_device_register(&zynq_cpuidle_device);
-       platform_device_register_full(&devinfo);
 }
 
 static void __init zynq_timer_init(void)
index 1dd10936d68d0422b328128fbb6a234d77adee1d..d5805e4bf2fc71a0db019c198daedccb847a8376 100644 (file)
@@ -87,7 +87,6 @@ static unsigned long irbar_read(void)
 /* MPU initialisation functions */
 void __init sanity_check_meminfo_mpu(void)
 {
-       int i;
        phys_addr_t phys_offset = PHYS_OFFSET;
        phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
        struct memblock_region *reg;
@@ -110,11 +109,13 @@ void __init sanity_check_meminfo_mpu(void)
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
-                        * all blocks afterwards
+                        * all blocks afterwards in one go (we can't remove
+                        * blocks separately while iterating)
                         */
                        pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
-                                 &mem_start, &reg->base);
-                       memblock_remove(reg->base, reg->size);
+                                 &mem_end, &reg->base);
+                       memblock_remove(reg->base, 0 - reg->base);
+                       break;
                }
        }
 
@@ -144,7 +145,7 @@ void __init sanity_check_meminfo_mpu(void)
                pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
                                &specified_mem_size, &aligned_region_size);
                memblock_remove(mem_start + aligned_region_size,
-                               specified_mem_size - aligned_round_size);
+                               specified_mem_size - aligned_region_size);
 
                mem_end = mem_start + aligned_region_size;
        }
@@ -261,7 +262,7 @@ void __init mpu_setup(void)
                return;
 
        region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
-                                       ilog2(meminfo.bank[0].size),
+                                       ilog2(memblock.memory.regions[0].size),
                                        MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
        if (region_err) {
                panic("MPU region initialization failure! %d", region_err);
@@ -285,7 +286,7 @@ void __init arm_mm_memblock_reserve(void)
         * some architectures which the DRAM is the exception vector to trap,
         * alloc_page breaks with error, although it is not NULL, but "0."
         */
-       memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
+       memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE);
 #else /* ifndef CONFIG_CPU_V7M */
        /*
         * There is no dedicated vector page on V7-M. So nothing needs to be
index a7315ebe3883f276456280de64d31537099cd292..706d2426024f7f0665c47dc37b92f566cb87a726 100644 (file)
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <0>;
-               status = "disabled";
        };
 
        soc {
index c976ebfe2269db83e7efcc803bcd14e390695e3b..57b4836b7ecd898e10197aa0d473ea6107b9fb42 100644 (file)
@@ -344,7 +344,7 @@ tracesys_next:
 #endif
 
        cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
-       comiclr,>>=     __NR_Linux_syscalls, %r20, %r0
+       comiclr,>>      __NR_Linux_syscalls, %r20, %r0
        b,n     .Ltracesys_nosys
 
        LDREGX  %r20(%r19), %r19
index e4396a7d0f7cf5627a92ea8c07756aba6bc52c7a..4afe66aa1400d5d7e3783e696a0a1de12d3a316c 100644 (file)
@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
            "andc       %1,%1,%2\n\t"
            "popcntd    %0,%1"
                : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
-               : "r" (bits));
+               : "b" (bits));
 
        return leading_zero_bits;
 }
index fb23fd6b186a1b0ffeeace7cca9e9a5a136fbfa6..c74d3701ad6830fe88338c185be374f7a6730f07 100644 (file)
@@ -24,7 +24,6 @@ CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
index 04920ab8e292b75a99986b1a2d10ac6273e2db94..3583d676a9161f8197e826553162f011f390c1cd 100644 (file)
@@ -48,7 +48,6 @@ CONFIG_SYN_COOKIES=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
index 56f933816144d47bf96506d09d3249445bc0fe32..1d8321c827a8821bb4e9f4989eb883cd761370db 100644 (file)
@@ -48,6 +48,7 @@
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
 #define SUN4V_CHIP_SPARC64X    0x8a
+#define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
 #ifndef __ASSEMBLY__
index b6de8b10a55b8b8f09eedb2c90d86906d5445686..36eee8132c22bac329e99fb7284211e11310ff26 100644 (file)
 #define __NR_setsockopt                355
 #define __NR_mlock2            356
 #define __NR_copy_file_range   357
+#define __NR_preadv2           358
+#define __NR_pwritev2          359
 
-#define NR_syscalls            358
+#define NR_syscalls            360
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 4ee1ad420862d425cff03ba7aad8e395fcb75907..655628def68e6be60b7cd31bda369b2fbbfd6c6b 100644 (file)
@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal:       /* Ok we may use interrupt globals safely. */
        subcc           %g1, %g2, %g1           ! Next cacheline
        bge,pt          %icc, 1b
         nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
+       ba,a,pt         %xcc, dcpe_icpe_tl1_common
 
 do_dcpe_tl1_fatal:
        sethi           %hi(1f), %g7
@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
        mov             0x2, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_dcpe_tl1,.-do_dcpe_tl1
 
        .globl          do_icpe_tl1
@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal:       /* Ok we may use interrupt globals safely. */
        subcc           %g1, %g2, %g1
        bge,pt          %icc, 1b
         nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
+       ba,a,pt         %xcc, dcpe_icpe_tl1_common
 
 do_icpe_tl1_fatal:
        sethi           %hi(1f), %g7
@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
        mov             0x3, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_icpe_tl1,.-do_icpe_tl1
        
        .type           dcpe_icpe_tl1_common,#function
@@ -456,7 +452,7 @@ __cheetah_log_error:
         cmp            %g2, 0x63
        be              c_cee
         nop
-       ba,pt           %xcc, c_deferred
+       ba,a,pt         %xcc, c_deferred
        .size           __cheetah_log_error,.-__cheetah_log_error
 
        /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
index dfad8b1aea9fb042a40290c766a3bb7ede4e0480..493e023a468a919c61d77451e43e0a4a2e414bbe 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_SN:
+               sparc_cpu_type = "SPARC-SN";
+               sparc_fpu_type = "SPARC-SN integrated FPU";
+               sparc_pmu_type = "sparc-sn";
+               break;
+
        case SUN4V_CHIP_SPARC64X:
                sparc_cpu_type = "SPARC64-X";
                sparc_fpu_type = "SPARC64-X integrated FPU";
index e69ec0e3f15527705b3ce28fc89ff5c0341f03fb..45c820e1cba5d949ff936f15392ca3c0c8578a34 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
                break;
index a6864826a4bd963f0188bd1f5d64218cbdee1981..336d2750fe78c3d4d79175e3427e94859e0cca95 100644 (file)
@@ -100,8 +100,8 @@ do_fpdis:
        fmuld           %f0, %f2, %f26
        faddd           %f0, %f2, %f28
        fmuld           %f0, %f2, %f30
-       b,pt            %xcc, fpdis_exit
-        nop
+       ba,a,pt         %xcc, fpdis_exit
+
 2:     andcc           %g5, FPRS_DU, %g0
        bne,pt          %icc, 3f
         fzero          %f32
@@ -144,8 +144,8 @@ do_fpdis:
        fmuld           %f32, %f34, %f58
        faddd           %f32, %f34, %f60
        fmuld           %f32, %f34, %f62
-       ba,pt           %xcc, fpdis_exit
-        nop
+       ba,a,pt         %xcc, fpdis_exit
+
 3:     mov             SECONDARY_CONTEXT, %g3
        add             %g6, TI_FPREGS, %g1
 
@@ -197,8 +197,7 @@ fpdis_exit2:
 fp_other_bounce:
        call            do_fpother
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           fp_other_bounce,.-fp_other_bounce
 
        .align          32
index cd1f592cd3479f8c94c599bfc589087184d4d180..a076b4249e622a4ebfe47eab6c9bcd7cb98b36ce 100644 (file)
@@ -414,6 +414,8 @@ sun4v_chip_type:
        cmp     %g2, 'T'
        be,pt   %xcc, 70f
         cmp    %g2, 'M'
+       be,pt   %xcc, 70f
+        cmp    %g2, 'S'
        bne,pn  %xcc, 49f
         nop
 
@@ -433,6 +435,9 @@ sun4v_chip_type:
        cmp     %g2, '7'
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
+       cmp     %g2, 'N'
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
         nop
 
@@ -461,9 +466,8 @@ sun4v_chip_type:
        subcc   %g3, 1, %g3
        bne,pt  %xcc, 41b
        add     %g1, 1, %g1
-       mov     SUN4V_CHIP_SPARC64X, %g4
        ba,pt   %xcc, 5f
-       nop
+        mov    SUN4V_CHIP_SPARC64X, %g4
 
 49:
        mov     SUN4V_CHIP_UNKNOWN, %g4
@@ -548,8 +552,7 @@ sun4u_init:
        stxa            %g0, [%g7] ASI_DMMU
        membar  #Sync
 
-       ba,pt           %xcc, sun4u_continue
-        nop
+       ba,a,pt         %xcc, sun4u_continue
 
 sun4v_init:
        /* Set ctx 0 */
@@ -560,14 +563,12 @@ sun4v_init:
        mov             SECONDARY_CONTEXT, %g7
        stxa            %g0, [%g7] ASI_MMU
        membar          #Sync
-       ba,pt           %xcc, niagara_tlb_fixup
-        nop
+       ba,a,pt         %xcc, niagara_tlb_fixup
 
 sun4u_continue:
        BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
 
-       ba,pt   %xcc, spitfire_tlb_fixup
-        nop
+       ba,a,pt %xcc, spitfire_tlb_fixup
 
 niagara_tlb_fixup:
        mov     3, %g2          /* Set TLB type to hypervisor. */
@@ -595,6 +596,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_SN
        be,pt   %xcc, niagara4_patch
         nop
 
@@ -639,8 +643,7 @@ niagara_patch:
        call    hypervisor_patch_cachetlbops
         nop
 
-       ba,pt   %xcc, tlb_fixup_done
-        nop
+       ba,a,pt %xcc, tlb_fixup_done
 
 cheetah_tlb_fixup:
        mov     2, %g2          /* Set TLB type to cheetah+. */
@@ -659,8 +662,7 @@ cheetah_tlb_fixup:
        call    cheetah_patch_cachetlbops
         nop
 
-       ba,pt   %xcc, tlb_fixup_done
-        nop
+       ba,a,pt %xcc, tlb_fixup_done
 
 spitfire_tlb_fixup:
        /* Set TLB type to spitfire. */
@@ -774,8 +776,7 @@ setup_trap_table:
        call    %o1
         add    %sp, (2047 + 128), %o0
 
-       ba,pt   %xcc, 2f
-        nop
+       ba,a,pt %xcc, 2f
 
 1:     sethi   %hi(sparc64_ttable_tl0), %o0
        set     prom_set_trap_table_name, %g2
@@ -814,8 +815,7 @@ setup_trap_table:
 
        BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
 
-       ba,pt   %xcc, 2f
-        nop
+       ba,a,pt %xcc, 2f
 
        /* Disable STICK_INT interrupts. */
 1:
index 753b4f031bfb710b77cefa78b74ade61a9d8850c..34b4933900bf7665b30e791565795d5782351b60 100644 (file)
@@ -18,8 +18,7 @@ __do_privact:
 109:   or              %g7, %lo(109b), %g7
        call            do_privact
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __do_privact,.-__do_privact
 
        .type           do_mna,#function
@@ -46,8 +45,7 @@ do_mna:
        mov             %l5, %o2
        call            mem_address_unaligned
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_mna,.-do_mna
 
        .type           do_lddfmna,#function
@@ -65,8 +63,7 @@ do_lddfmna:
        mov             %l5, %o2
        call            handle_lddfmna
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_lddfmna,.-do_lddfmna
 
        .type           do_stdfmna,#function
@@ -84,8 +81,7 @@ do_stdfmna:
        mov             %l5, %o2
        call            handle_stdfmna
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_stdfmna,.-do_stdfmna
 
        .type           breakpoint_trap,#function
index badf0951d73c8f2d875aa5cbc093caef04616aeb..c2b202d763a16ae74d1d150ed6e7dd940341d7dd 100644 (file)
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
        }
 }
 
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+                                 void *stc, void *host_controller,
+                                 struct platform_device  *op,
+                                 int numa_node)
+{
+       sd->iommu = iommu;
+       sd->stc = stc;
+       sd->host_controller = host_controller;
+       sd->op = op;
+       sd->numa_node = numa_node;
+}
+
 static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
                                         struct device_node *node,
                                         struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
        if (!dev)
                return NULL;
 
+       op = of_find_device_by_node(node);
        sd = &dev->dev.archdata;
-       sd->iommu = pbm->iommu;
-       sd->stc = &pbm->stc;
-       sd->host_controller = pbm;
-       sd->op = op = of_find_device_by_node(node);
-       sd->numa_node = pbm->numa_node;
-
+       pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+                             pbm->numa_node);
        sd = &op->dev.archdata;
        sd->iommu = pbm->iommu;
        sd->stc = &pbm->stc;
@@ -994,6 +1003,27 @@ void pcibios_set_master(struct pci_dev *dev)
        /* No special bus mastering setup handling */
 }
 
+#ifdef CONFIG_PCI_IOV
+int pcibios_add_device(struct pci_dev *dev)
+{
+       struct pci_dev *pdev;
+
+       /* Add sriov arch specific initialization here.
+        * Copy dev_archdata from PF to VF
+        */
+       if (dev->is_virtfn) {
+               struct dev_archdata *psd;
+
+               pdev = dev->physfn;
+               psd = &pdev->dev.archdata;
+               pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+                                     psd->stc, psd->host_controller, NULL,
+                                     psd->numa_node);
+       }
+       return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
 static int __init pcibios_init(void)
 {
        pci_dfl_cache_line_size = 64 >> 2;
index 26db95b54ee94c44537590a1b94a4870bd9227ce..599f1207eed2e469157e2475631d2537543b5794 100644 (file)
@@ -285,7 +285,8 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
 
@@ -524,6 +525,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
                if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
@@ -532,6 +534,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
        }
@@ -561,6 +564,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
                                        AV_SPARC_ASI_BLK_INIT |
@@ -570,6 +574,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
                                        AV_SPARC_FMAF);
index c357e40ffd01526ca38a824c12ac077cdba135c6..4a73009f66a5727e43ad45dbe1a2f7bded8cd48f 100644 (file)
@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
        ba,pt           %xcc, etraptl1
         rd             %pc, %g7
 
-       ba,pt           %xcc, 2f
-        nop
+       ba,a,pt         %xcc, 2f
 
 1:     ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
        mov             %l5, %o2
        call            spitfire_access_error
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_access_error,.-__spitfire_access_error
 
        /* This is the trap handler entry point for ECC correctable
@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
        mov             %l5, %o2
        call            spitfire_data_access_exception_tl1
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
 
        .type           __spitfire_data_access_exception,#function
@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
        mov             %l5, %o2
        call            spitfire_data_access_exception
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_data_access_exception,.-__spitfire_data_access_exception
 
        .type           __spitfire_insn_access_exception_tl1,#function
@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
        mov             %l5, %o2
        call            spitfire_insn_access_exception_tl1
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
 
        .type           __spitfire_insn_access_exception,#function
@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
        mov             %l5, %o2
        call            spitfire_insn_access_exception
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
index 6c3dd6c52f8bd09135e81f1d56704602d10c4f4e..eac7f0db5c8c6269a913152a11941f66f5f3e8d0 100644 (file)
@@ -88,4 +88,4 @@ sys_call_table:
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range
+/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
index 12b524cfcfa0120caabdf7aa5b8606ecc3978c96..b0f17ff2ddba2daa75a8e8a646b5661dfe0d36e9 100644 (file)
@@ -89,7 +89,7 @@ sys_call_table32:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
+       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@ sys_call_table:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word sys_setsockopt, sys_mlock2, sys_copy_file_range
+       .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
index b7f0f3f3a909b05b544da1a7b3d3e21d12bfa357..c731e8023d3e7c6333ae091f39ca76aedf332603 100644 (file)
@@ -11,8 +11,7 @@ utrap_trap:           /* %g3=handler,%g4=level */
        mov             %l4, %o1
         call           bad_trap
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
 
 invoke_utrap:
        sllx            %g3, 3, %g3
index cb5789c9f9613ed692733d50dcf0e2c39784b1f7..f6bb857254fcfa170155d4cd8dc8cb717c5bfb97 100644 (file)
@@ -45,6 +45,14 @@ static const struct vio_device_id *vio_match_device(
        return NULL;
 }
 
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+       const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+       add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+       return 0;
+}
+
 static int vio_bus_match(struct device *dev, struct device_driver *drv)
 {
        struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -105,15 +113,25 @@ static ssize_t type_show(struct device *dev,
        return sprintf(buf, "%s\n", vdev->type);
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       const struct vio_dev *vdev = to_vio_dev(dev);
+
+       return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+
 static struct device_attribute vio_dev_attrs[] = {
        __ATTR_RO(devspec),
        __ATTR_RO(type),
+       __ATTR_RO(modalias),
        __ATTR_NULL
 };
 
 static struct bus_type vio_bus_type = {
        .name           = "vio",
        .dev_attrs      = vio_dev_attrs,
+       .uevent         = vio_hotplug,
        .match          = vio_bus_match,
        .probe          = vio_device_probe,
        .remove         = vio_device_remove,
index aadd321aa05db983af75a13ee14f2693ad75ed5c..7d02b1fef0256bb84c218e55b4d2328d037f5903 100644 (file)
@@ -33,6 +33,10 @@ ENTRY(_start)
 jiffies = jiffies_64;
 #endif
 
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
 SECTIONS
 {
 #ifdef CONFIG_SPARC64
index 1e67ce95836972d439b50c88aa77d8395a543652..855019a8590ea5d556b71c9ae9356f7dcb629588 100644 (file)
@@ -32,8 +32,7 @@ fill_fixup:
         rd     %pc, %g7
        call    do_sparc64_fault
         add    %sp, PTREGS_OFF, %o0
-       ba,pt   %xcc, rtrap
-        nop
+       ba,a,pt %xcc, rtrap
 
        /* Be very careful about usage of the trap globals here.
         * You cannot touch %g5 as that has the fault information.
index 1cfe6aab7a11572d54f6848fe4656b7b40af8cf2..09e838801e397b4071b6604852e24a2a799e0a86 100644 (file)
@@ -1769,6 +1769,7 @@ static void __init setup_page_offset(void)
                        max_phys_bits = 47;
                        break;
                case SUN4V_CHIP_SPARC_M7:
+               case SUN4V_CHIP_SPARC_SN:
                default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
@@ -1986,6 +1987,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
        default:
@@ -2138,6 +2140,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
        default:
index 40625ca7a190953fc87c386a03017bab96903435..6011a573dd64995f0818667c1bb426518d689b78 100644 (file)
@@ -474,6 +474,7 @@ static __init int _init_perf_amd_iommu(
 
 static struct perf_amd_iommu __perf_iommu = {
        .pmu = {
+               .task_ctx_nr    = perf_invalid_context,
                .event_init     = perf_iommu_event_init,
                .add            = perf_iommu_add,
                .del            = perf_iommu_del,
index aff79884e17d2818531b978df08726c882cc9fed..a6fd4dbcf820abf727b6118c0084a6877ec0340d 100644 (file)
@@ -3637,6 +3637,8 @@ __init int intel_pmu_init(void)
                pr_cont("Knights Landing events, ");
                break;
 
+       case 142: /* 14nm Kabylake Mobile */
+       case 158: /* 14nm Kabylake Desktop */
        case 78: /* 14nm Skylake Mobile */
        case 94: /* 14nm Skylake Desktop */
        case 85: /* 14nm Skylake Server */
index 8f4942e2bcbb21584a78225b1208adefe86e0aa9..d7ce96a7dacaedc630a0e9a5ef5b761edbd0ffc7 100644 (file)
@@ -891,9 +891,7 @@ void __init uv_system_init(void)
        }
        pr_info("UV: Found %s hub\n", hub);
 
-       /* We now only need to map the MMRs on UV1 */
-       if (is_uv1_hub())
-               map_low_mmrs();
+       map_low_mmrs();
 
        m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
index b285d4e8c68e33bf3d4f2b070fdbcbbce7036d9a..5da924bbf0a0f22aa676f5be5a3f97fe4811b087 100644 (file)
@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
                                        continue;
                                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                                        resource_size_t start, end;
+                                       unsigned long flags;
+
+                                       flags = pci_resource_flags(dev, i);
+                                       if (!(flags & IORESOURCE_MEM))
+                                               continue;
+
+                                       if (flags & IORESOURCE_UNSET)
+                                               continue;
+
+                                       if (pci_resource_len(dev, i) == 0)
+                                               continue;
 
                                        start = pci_resource_start(dev, i);
-                                       if (start == 0)
-                                               break;
                                        end = pci_resource_end(dev, i);
                                        if (screen_info.lfb_base >= start &&
                                            screen_info.lfb_base < end) {
                                                found_bar = 1;
+                                               break;
                                        }
                                }
                        }
index 92ae6acac8a7fbcb9b91cb386b3c23015c5377ea..6aa0f4d9eea6816bbf0c78514e0b76d8f4dc7def 100644 (file)
@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
 
        if (freq_desc_tables[cpu_index].msr_plat) {
                rdmsr(MSR_PLATFORM_INFO, lo, hi);
-               ratio = (lo >> 8) & 0x1f;
+               ratio = (lo >> 8) & 0xff;
        } else {
                rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
                ratio = (hi >> 8) & 0x1f;
index 1ff4dbb73fb7a7ee56c2c28e0d37a6a049f3adc9..b6f50e8b0a393675009a5dcaad7f30af315bc91d 100644 (file)
@@ -2823,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         */
        if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
-           PageTransCompound(pfn_to_page(pfn)) &&
+           PageTransCompoundMap(pfn_to_page(pfn)) &&
            !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
                unsigned long mask;
                /*
@@ -4785,7 +4785,7 @@ restart:
                 */
                if (sp->role.direct &&
                        !kvm_is_reserved_pfn(pfn) &&
-                       PageTransCompound(pfn_to_page(pfn))) {
+                       PageTransCompoundMap(pfn_to_page(pfn))) {
                        drop_spte(kvm, sptep);
                        need_tlb_flush = 1;
                        goto restart;
index a2433817c987833f525380d8425bb6e902873bbd..6a2f5691b1ab57261809617ca9a9b375adfe135e 100644 (file)
@@ -43,40 +43,40 @@ void __init efi_bgrt_init(void)
                return;
 
        if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
-               pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
+               pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       bgrt_tab->header.length, sizeof(*bgrt_tab));
                return;
        }
        if (bgrt_tab->version != 1) {
-               pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
+               pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
                       bgrt_tab->version);
                return;
        }
        if (bgrt_tab->status & 0xfe) {
-               pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
+               pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
                       bgrt_tab->status);
                return;
        }
        if (bgrt_tab->image_type != 0) {
-               pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
+               pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
                       bgrt_tab->image_type);
                return;
        }
        if (!bgrt_tab->image_address) {
-               pr_err("Ignoring BGRT: null image address\n");
+               pr_notice("Ignoring BGRT: null image address\n");
                return;
        }
 
        image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
        if (!image) {
-               pr_err("Ignoring BGRT: failed to map image header memory\n");
+               pr_notice("Ignoring BGRT: failed to map image header memory\n");
                return;
        }
 
        memcpy(&bmp_header, image, sizeof(bmp_header));
        memunmap(image);
        if (bmp_header.id != 0x4d42) {
-               pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+               pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
                        bmp_header.id);
                return;
        }
@@ -84,14 +84,14 @@ void __init efi_bgrt_init(void)
 
        bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
        if (!bgrt_image) {
-               pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
+               pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
                       bgrt_image_size);
                return;
        }
 
        image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
        if (!image) {
-               pr_err("Ignoring BGRT: failed to map image memory\n");
+               pr_notice("Ignoring BGRT: failed to map image memory\n");
                kfree(bgrt_image);
                bgrt_image = NULL;
                return;
index 1982310e6d83a3aa6a6e510e8f72682fb1c05226..da198b8641074cff2830687be53ffa79ba65df0c 100644 (file)
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
                                obj_desc->method.mutex->mutex.
                                    original_sync_level =
                                    obj_desc->method.mutex->mutex.sync_level;
+
+                               obj_desc->method.mutex->mutex.thread_id =
+                                   acpi_os_get_thread_id();
                        }
                }
 
index d0f35e63640ba78b956dd6ed3b870a542fedaac5..63cc9dbe4f3b4d1db7037fb4ca1df532b8dc5534 100644 (file)
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                        offset);
                        rc = -ENXIO;
                }
-       } else
+       } else {
                rc = 0;
+               if (cmd_rc)
+                       *cmd_rc = xlat_status(buf, cmd);
+       }
 
  out:
        ACPI_FREE(out_obj);
index 5083f85efea76edb7f2968ffa967becbf4d4943a..cfa936a32513b1d70d74834fa743b6f612665dc1 100644 (file)
@@ -202,6 +202,14 @@ config SATA_FSL
 
          If unsure, say N.
 
+config SATA_AHCI_SEATTLE
+       tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+       depends on ARCH_SEATTLE
+       help
+        This option enables support for AMD Seattle SATA host controller.
+
+        If unsure, say N
+
 config SATA_INIC162X
        tristate "Initio 162x SATA support (Very Experimental)"
        depends on PCI
index 18579521464e48e22001838ab4d0abf3efbc4b0a..0b2afb7e5f359979f7a54cdadb3178b174c885b0 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA)               += libata.o
 # non-SFF interface
 obj-$(CONFIG_SATA_AHCI)                += ahci.o libahci.o
 obj-$(CONFIG_SATA_ACARD_AHCI)  += acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE)        += ahci_seattle.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_FSL)         += sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)    += sata_inic162x.o
index 40442332bfa7c154b93b540eae3eb8ae94e6d9f3..62a04c8fb5c99c9ab0614bb6dc8f0146c47329e6 100644 (file)
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
+       of_property_read_u32(dev->of_node,
+                            "ports-implemented", &hpriv->force_port_map);
+
        if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
                hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644 (file)
index 0000000..6e702ab
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit         Type            Description
+ * 31          RW              OD7.2 (activity)
+ * 30          RW              OD7.1 (locate)
+ * 29          RW              OD7.0 (fault)
+ * 28...8      RW              OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7           RO              SGPIO feature flag
+ * 6:4         RO              Reserved
+ * 3:0         RO              Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x)            (8 + (3 * x))
+#define LOCATE_BIT_POS(x)              (ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x)               (LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK                  0x00010000
+#define LOCATE_MASK                    0x00080000
+#define FAULT_MASK                     0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+                                           ssize_t size);
+
+struct seattle_plat_data {
+       void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+       .inherits               = &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+       .inherits               = &ahci_ops,
+       .transmit_led_message   = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+       .flags          = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+       .link_flags     = ATA_LFLAG_SW_ACTIVITY,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+                                           ssize_t size)
+{
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       struct ahci_port_priv *pp = ap->private_data;
+       struct seattle_plat_data *plat_data = hpriv->plat_data;
+       unsigned long flags;
+       int pmp;
+       struct ahci_em_priv *emp;
+       u32 val;
+
+       /* get the slot number from the message */
+       pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+       if (pmp >= EM_MAX_SLOTS)
+               return -EINVAL;
+       emp = &pp->em_priv[pmp];
+
+       val = ioread32(plat_data->sgpio_ctrl);
+       if (state & ACTIVITY_MASK)
+               val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+       if (state & LOCATE_MASK)
+               val |= 1 << LOCATE_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+       if (state & FAULT_MASK)
+               val |= 1 << FAULT_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+       iowrite32(val, plat_data->sgpio_ctrl);
+
+       spin_lock_irqsave(ap->lock, flags);
+
+       /* save off new led state for port/slot */
+       emp->led_state = state;
+
+       spin_unlock_irqrestore(ap->lock, flags);
+
+       return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+               struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+       struct device *dev = &pdev->dev;
+       struct seattle_plat_data *plat_data;
+       u32 val;
+
+       plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+       if (IS_ERR(plat_data))
+               return &ahci_port_info;
+
+       plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+                             platform_get_resource(pdev, IORESOURCE_MEM, 1));
+       if (IS_ERR(plat_data->sgpio_ctrl))
+               return &ahci_port_info;
+
+       val = ioread32(plat_data->sgpio_ctrl);
+
+       if (!(val & 0xf))
+               return &ahci_port_info;
+
+       hpriv->em_loc = 0;
+       hpriv->em_buf_sz = 4;
+       hpriv->em_msg_type = EM_MSG_TYPE_LED;
+       hpriv->plat_data = plat_data;
+
+       dev_info(dev, "SGPIO LED control is enabled.\n");
+       return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+       int rc;
+       struct ahci_host_priv *hpriv;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       rc = ahci_platform_init_host(pdev, hpriv,
+                                    ahci_seattle_get_port_info(pdev, hpriv),
+                                    &ahci_platform_sht);
+       if (rc)
+               goto disable_resources;
+
+       return 0;
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+                        ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+       { "AMDI0600", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+       .probe = ahci_seattle_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = DRV_NAME,
+               .acpi_match_table = ahci_acpi_match,
+               .pm = &ahci_pm_ops,
+       },
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
index 3982054060b81bb0d253b001883b32e346c6dc74..a5d7c1c2a05ee26dc80a41d440357c4a7be96a20 100644 (file)
@@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
                dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
                         port_map, hpriv->force_port_map);
                port_map = hpriv->force_port_map;
+               hpriv->saved_port_map = port_map;
        }
 
        if (hpriv->mask_port_map) {
index 19837ef04d8ef21a355e22171117398b2f966f80..e70ceb406fe9171f8b3dd1cc2fced28fcbe3e778 100644 (file)
@@ -1,3 +1,4 @@
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 obj-y                          += core.o cpu.o
+obj-$(CONFIG_OF)               += of.o
 obj-$(CONFIG_DEBUG_FS)         += debugfs.o
index 433b60092972d56abba55897158d6c22156cf631..7c04c87738a69d13b28060fe5dfa9b095798709e 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/device.h>
-#include <linux/of.h>
 #include <linux/export.h>
 #include <linux/regulator/consumer.h>
 
@@ -29,7 +28,7 @@
  * from here, with each opp_table containing the list of opps it supports in
  * various states of availability.
  */
-static LIST_HEAD(opp_tables);
+LIST_HEAD(opp_tables);
 /* Lock to allow exclusive modification to the device and opp lists */
 DEFINE_MUTEX(opp_table_lock);
 
@@ -53,26 +52,6 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
        return NULL;
 }
 
-static struct opp_table *_managed_opp(const struct device_node *np)
-{
-       struct opp_table *opp_table;
-
-       list_for_each_entry_rcu(opp_table, &opp_tables, node) {
-               if (opp_table->np == np) {
-                       /*
-                        * Multiple devices can point to the same OPP table and
-                        * so will have same node-pointer, np.
-                        *
-                        * But the OPPs will be considered as shared only if the
-                        * OPP table contains a "opp-shared" property.
-                        */
-                       return opp_table->shared_opp ? opp_table : NULL;
-               }
-       }
-
-       return NULL;
-}
-
 /**
  * _find_opp_table() - find opp_table struct using device pointer
  * @dev:       device pointer used to lookup OPP table
@@ -259,9 +238,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
        reg = opp_table->regulator;
        if (IS_ERR(reg)) {
                /* Regulator may not be required for device */
-               if (reg)
-                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
-                               PTR_ERR(reg));
                rcu_read_unlock();
                return 0;
        }
@@ -760,7 +736,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
 {
        struct opp_table *opp_table;
        struct opp_device *opp_dev;
-       struct device_node *np;
        int ret;
 
        /* Check for existing table for 'dev' first */
@@ -784,20 +759,7 @@ static struct opp_table *_add_opp_table(struct device *dev)
                return NULL;
        }
 
-       /*
-        * Only required for backward compatibility with v1 bindings, but isn't
-        * harmful for other cases. And so we do it unconditionally.
-        */
-       np = of_node_get(dev->of_node);
-       if (np) {
-               u32 val;
-
-               if (!of_property_read_u32(np, "clock-latency", &val))
-                       opp_table->clock_latency_ns_max = val;
-               of_property_read_u32(np, "voltage-tolerance",
-                                    &opp_table->voltage_tolerance_v1);
-               of_node_put(np);
-       }
+       _of_init_opp_table(opp_table, dev);
 
        /* Set regulator to a non-NULL error value */
        opp_table->regulator = ERR_PTR(-ENXIO);
@@ -893,8 +855,8 @@ static void _kfree_opp_rcu(struct rcu_head *head)
  * It is assumed that the caller holds required mutex for an RCU updater
  * strategy.
  */
-static void _opp_remove(struct opp_table *opp_table,
-                       struct dev_pm_opp *opp, bool notify)
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
+                bool notify)
 {
        /*
         * Notify the changes in the availability of the operable
@@ -955,8 +917,8 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
-static struct dev_pm_opp *_allocate_opp(struct device *dev,
-                                       struct opp_table **opp_table)
+struct dev_pm_opp *_allocate_opp(struct device *dev,
+                                struct opp_table **opp_table)
 {
        struct dev_pm_opp *opp;
 
@@ -992,8 +954,8 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
        return true;
 }
 
-static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
-                   struct opp_table *opp_table)
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+            struct opp_table *opp_table)
 {
        struct dev_pm_opp *opp;
        struct list_head *head = &opp_table->opp_list;
@@ -1069,8 +1031,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  *             Duplicate OPPs (both freq and volt are same) and !opp->available
  * -ENOMEM     Memory allocation failure
  */
-static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
-                      bool dynamic)
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+               bool dynamic)
 {
        struct opp_table *opp_table;
        struct dev_pm_opp *new_opp;
@@ -1115,83 +1077,6 @@ unlock:
        return ret;
 }
 
-/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
-                             struct opp_table *opp_table)
-{
-       u32 microvolt[3] = {0};
-       u32 val;
-       int count, ret;
-       struct property *prop = NULL;
-       char name[NAME_MAX];
-
-       /* Search for "opp-microvolt-<name>" */
-       if (opp_table->prop_name) {
-               snprintf(name, sizeof(name), "opp-microvolt-%s",
-                        opp_table->prop_name);
-               prop = of_find_property(opp->np, name, NULL);
-       }
-
-       if (!prop) {
-               /* Search for "opp-microvolt" */
-               sprintf(name, "opp-microvolt");
-               prop = of_find_property(opp->np, name, NULL);
-
-               /* Missing property isn't a problem, but an invalid entry is */
-               if (!prop)
-                       return 0;
-       }
-
-       count = of_property_count_u32_elems(opp->np, name);
-       if (count < 0) {
-               dev_err(dev, "%s: Invalid %s property (%d)\n",
-                       __func__, name, count);
-               return count;
-       }
-
-       /* There can be one or three elements here */
-       if (count != 1 && count != 3) {
-               dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
-                       __func__, name, count);
-               return -EINVAL;
-       }
-
-       ret = of_property_read_u32_array(opp->np, name, microvolt, count);
-       if (ret) {
-               dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
-               return -EINVAL;
-       }
-
-       opp->u_volt = microvolt[0];
-
-       if (count == 1) {
-               opp->u_volt_min = opp->u_volt;
-               opp->u_volt_max = opp->u_volt;
-       } else {
-               opp->u_volt_min = microvolt[1];
-               opp->u_volt_max = microvolt[2];
-       }
-
-       /* Search for "opp-microamp-<name>" */
-       prop = NULL;
-       if (opp_table->prop_name) {
-               snprintf(name, sizeof(name), "opp-microamp-%s",
-                        opp_table->prop_name);
-               prop = of_find_property(opp->np, name, NULL);
-       }
-
-       if (!prop) {
-               /* Search for "opp-microamp" */
-               sprintf(name, "opp-microamp");
-               prop = of_find_property(opp->np, name, NULL);
-       }
-
-       if (prop && !of_property_read_u32(opp->np, name, &val))
-               opp->u_amp = val;
-
-       return 0;
-}
-
 /**
  * dev_pm_opp_set_supported_hw() - Set supported platforms
  * @dev: Device for which supported-hw has to be set.
@@ -1520,144 +1405,6 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
 
-static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
-                             struct device_node *np)
-{
-       unsigned int count = opp_table->supported_hw_count;
-       u32 version;
-       int ret;
-
-       if (!opp_table->supported_hw)
-               return true;
-
-       while (count--) {
-               ret = of_property_read_u32_index(np, "opp-supported-hw", count,
-                                                &version);
-               if (ret) {
-                       dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
-                                __func__, count, ret);
-                       return false;
-               }
-
-               /* Both of these are bitwise masks of the versions */
-               if (!(version & opp_table->supported_hw[count]))
-                       return false;
-       }
-
-       return true;
-}
-
-/**
- * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
- * @dev:       device for which we do this operation
- * @np:                device node
- *
- * This function adds an opp definition to the opp table and returns status. The
- * opp can be controlled using dev_pm_opp_enable/disable functions and may be
- * removed by dev_pm_opp_remove.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0           On success OR
- *             Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST     Freq are same and volt are different OR
- *             Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM     Memory allocation failure
- * -EINVAL     Failed parsing the OPP node
- */
-static int _opp_add_static_v2(struct device *dev, struct device_node *np)
-{
-       struct opp_table *opp_table;
-       struct dev_pm_opp *new_opp;
-       u64 rate;
-       u32 val;
-       int ret;
-
-       /* Hold our table modification lock here */
-       mutex_lock(&opp_table_lock);
-
-       new_opp = _allocate_opp(dev, &opp_table);
-       if (!new_opp) {
-               ret = -ENOMEM;
-               goto unlock;
-       }
-
-       ret = of_property_read_u64(np, "opp-hz", &rate);
-       if (ret < 0) {
-               dev_err(dev, "%s: opp-hz not found\n", __func__);
-               goto free_opp;
-       }
-
-       /* Check if the OPP supports hardware's hierarchy of versions or not */
-       if (!_opp_is_supported(dev, opp_table, np)) {
-               dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
-               goto free_opp;
-       }
-
-       /*
-        * Rate is defined as an unsigned long in clk API, and so casting
-        * explicitly to its type. Must be fixed once rate is 64 bit
-        * guaranteed in clk API.
-        */
-       new_opp->rate = (unsigned long)rate;
-       new_opp->turbo = of_property_read_bool(np, "turbo-mode");
-
-       new_opp->np = np;
-       new_opp->dynamic = false;
-       new_opp->available = true;
-
-       if (!of_property_read_u32(np, "clock-latency-ns", &val))
-               new_opp->clock_latency_ns = val;
-
-       ret = opp_parse_supplies(new_opp, dev, opp_table);
-       if (ret)
-               goto free_opp;
-
-       ret = _opp_add(dev, new_opp, opp_table);
-       if (ret)
-               goto free_opp;
-
-       /* OPP to select on device suspend */
-       if (of_property_read_bool(np, "opp-suspend")) {
-               if (opp_table->suspend_opp) {
-                       dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
-                                __func__, opp_table->suspend_opp->rate,
-                                new_opp->rate);
-               } else {
-                       new_opp->suspend = true;
-                       opp_table->suspend_opp = new_opp;
-               }
-       }
-
-       if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
-               opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
-
-       mutex_unlock(&opp_table_lock);
-
-       pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
-                __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
-                new_opp->u_volt_min, new_opp->u_volt_max,
-                new_opp->clock_latency_ns);
-
-       /*
-        * Notify the changes in the availability of the operable
-        * frequency/voltage list.
-        */
-       srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
-       return 0;
-
-free_opp:
-       _opp_remove(opp_table, new_opp, false);
-unlock:
-       mutex_unlock(&opp_table_lock);
-       return ret;
-}
-
 /**
  * dev_pm_opp_add()  - Add an OPP table from a table definitions
  * @dev:       device for which we do this operation
@@ -1845,21 +1592,11 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 
-#ifdef CONFIG_OF
-/**
- * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
- *                               entries
- * @dev:       device pointer used to lookup OPP table.
- *
- * Free OPPs created using static entries present in DT.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
+/*
+ * Free OPPs either created using static entries present in DT or even the
+ * dynamically added entries based on remove_all param.
  */
-void dev_pm_opp_of_remove_table(struct device *dev)
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
 {
        struct opp_table *opp_table;
        struct dev_pm_opp *opp, *tmp;
@@ -1884,7 +1621,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
        if (list_is_singular(&opp_table->dev_list)) {
                /* Free static OPPs */
                list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
-                       if (!opp->dynamic)
+                       if (remove_all || !opp->dynamic)
                                _opp_remove(opp_table, opp, true);
                }
        } else {
@@ -1894,160 +1631,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
 unlock:
        mutex_unlock(&opp_table_lock);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
-{
-       /*
-        * TODO: Support for multiple OPP tables.
-        *
-        * There should be only ONE phandle present in "operating-points-v2"
-        * property.
-        */
-
-       return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
-}
-
-/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
-{
-       struct device_node *np;
-       struct opp_table *opp_table;
-       int ret = 0, count = 0;
-
-       mutex_lock(&opp_table_lock);
-
-       opp_table = _managed_opp(opp_np);
-       if (opp_table) {
-               /* OPPs are already managed */
-               if (!_add_opp_dev(dev, opp_table))
-                       ret = -ENOMEM;
-               mutex_unlock(&opp_table_lock);
-               return ret;
-       }
-       mutex_unlock(&opp_table_lock);
-
-       /* We have opp-table node now, iterate over it and add OPPs */
-       for_each_available_child_of_node(opp_np, np) {
-               count++;
-
-               ret = _opp_add_static_v2(dev, np);
-               if (ret) {
-                       dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
-                               ret);
-                       goto free_table;
-               }
-       }
-
-       /* There should be one of more OPP defined */
-       if (WARN_ON(!count))
-               return -ENOENT;
-
-       mutex_lock(&opp_table_lock);
-
-       opp_table = _find_opp_table(dev);
-       if (WARN_ON(IS_ERR(opp_table))) {
-               ret = PTR_ERR(opp_table);
-               mutex_unlock(&opp_table_lock);
-               goto free_table;
-       }
-
-       opp_table->np = opp_np;
-       opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
-
-       mutex_unlock(&opp_table_lock);
-
-       return 0;
-
-free_table:
-       dev_pm_opp_of_remove_table(dev);
-
-       return ret;
-}
-
-/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
-{
-       const struct property *prop;
-       const __be32 *val;
-       int nr;
-
-       prop = of_find_property(dev->of_node, "operating-points", NULL);
-       if (!prop)
-               return -ENODEV;
-       if (!prop->value)
-               return -ENODATA;
-
-       /*
-        * Each OPP is a set of tuples consisting of frequency and
-        * voltage like <freq-kHz vol-uV>.
-        */
-       nr = prop->length / sizeof(u32);
-       if (nr % 2) {
-               dev_err(dev, "%s: Invalid OPP table\n", __func__);
-               return -EINVAL;
-       }
-
-       val = prop->value;
-       while (nr) {
-               unsigned long freq = be32_to_cpup(val++) * 1000;
-               unsigned long volt = be32_to_cpup(val++);
-
-               if (_opp_add_v1(dev, freq, volt, false))
-                       dev_warn(dev, "%s: Failed to add OPP %ld\n",
-                                __func__, freq);
-               nr -= 2;
-       }
-
-       return 0;
-}
 
 /**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  * @dev:       device pointer used to lookup OPP table.
  *
- * Register the initial OPP table with the OPP library for given device.
+ * Free both OPPs created using static entries present in DT and the
+ * dynamically added entries.
  *
  * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
- *
- * Return:
- * 0           On success OR
- *             Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST     Freq are same and volt are different OR
- *             Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM     Memory allocation failure
- * -ENODEV     when 'operating-points' property is not found or is invalid data
- *             in device node.
- * -ENODATA    when empty 'operating-points' property is found
- * -EINVAL     when invalid entries are found in opp-v2 table
  */
-int dev_pm_opp_of_add_table(struct device *dev)
+void dev_pm_opp_remove_table(struct device *dev)
 {
-       struct device_node *opp_np;
-       int ret;
-
-       /*
-        * OPPs have two version of bindings now. The older one is deprecated,
-        * try for the new binding first.
-        */
-       opp_np = _of_get_opp_desc_node(dev);
-       if (!opp_np) {
-               /*
-                * Try old-deprecated bindings for backward compatibility with
-                * older dtbs.
-                */
-               return _of_add_opp_table_v1(dev);
-       }
-
-       ret = _of_add_opp_table_v2(dev, opp_np);
-       of_node_put(opp_np);
-
-       return ret;
+       _dev_pm_opp_remove_table(dev, true);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
index ba2bdbd932ef3c1ebaff47c6203bddb27fe9c03b..83d6e7ba1a343db48a3de678a3ba5b7df973c221 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/of.h>
 #include <linux/slab.h>
 
 #include "opp.h"
@@ -119,8 +118,66 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
 #endif /* CONFIG_CPU_FREQ */
 
-/* Required only for V1 bindings, as v2 can manage it from DT itself */
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
+{
+       struct device *cpu_dev;
+       int cpu;
+
+       WARN_ON(cpumask_empty(cpumask));
+
+       for_each_cpu(cpu, cpumask) {
+               cpu_dev = get_cpu_device(cpu);
+               if (!cpu_dev) {
+                       pr_err("%s: failed to get cpu%d device\n", __func__,
+                              cpu);
+                       continue;
+               }
+
+               if (of)
+                       dev_pm_opp_of_remove_table(cpu_dev);
+               else
+                       dev_pm_opp_remove_table(cpu_dev);
+       }
+}
+
+/**
+ * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask:   cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used to remove all the OPPs entries associated with
+ * the cpus in @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+       _dev_pm_opp_cpumask_remove_table(cpumask, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
+ * @cpu_dev:   CPU device for which we do this operation
+ * @cpumask:   cpumask of the CPUs which share the OPP table with @cpu_dev
+ *
+ * This marks OPP table of the @cpu_dev as shared by the CPUs present in
+ * @cpumask.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
+                               const struct cpumask *cpumask)
 {
        struct opp_device *opp_dev;
        struct opp_table *opp_table;
@@ -131,7 +188,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
 
        opp_table = _find_opp_table(cpu_dev);
        if (IS_ERR(opp_table)) {
-               ret = -EINVAL;
+               ret = PTR_ERR(opp_table);
                goto unlock;
        }
 
@@ -152,6 +209,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
                                __func__, cpu);
                        continue;
                }
+
+               /* Mark opp-table as multiple CPUs are sharing it now */
+               opp_table->shared_opp = true;
        }
 unlock:
        mutex_unlock(&opp_table_lock);
@@ -160,112 +220,47 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
 
-#ifdef CONFIG_OF
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
-{
-       struct device *cpu_dev;
-       int cpu;
-
-       WARN_ON(cpumask_empty(cpumask));
-
-       for_each_cpu(cpu, cpumask) {
-               cpu_dev = get_cpu_device(cpu);
-               if (!cpu_dev) {
-                       pr_err("%s: failed to get cpu%d device\n", __func__,
-                              cpu);
-                       continue;
-               }
-
-               dev_pm_opp_of_remove_table(cpu_dev);
-       }
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
-
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
-       struct device *cpu_dev;
-       int cpu, ret = 0;
-
-       WARN_ON(cpumask_empty(cpumask));
-
-       for_each_cpu(cpu, cpumask) {
-               cpu_dev = get_cpu_device(cpu);
-               if (!cpu_dev) {
-                       pr_err("%s: failed to get cpu%d device\n", __func__,
-                              cpu);
-                       continue;
-               }
-
-               ret = dev_pm_opp_of_add_table(cpu_dev);
-               if (ret) {
-                       pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
-                              __func__, cpu, ret);
-
-                       /* Free all other OPPs */
-                       dev_pm_opp_of_cpumask_remove_table(cpumask);
-                       break;
-               }
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
-
-/*
- * Works only for OPP v2 bindings.
+/**
+ * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
+ * @cpu_dev:   CPU device for which we do this operation
+ * @cpumask:   cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  *
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
  */
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 {
-       struct device_node *np, *tmp_np;
-       struct device *tcpu_dev;
-       int cpu, ret = 0;
-
-       /* Get OPP descriptor node */
-       np = _of_get_opp_desc_node(cpu_dev);
-       if (!np) {
-               dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
-               return -ENOENT;
-       }
-
-       cpumask_set_cpu(cpu_dev->id, cpumask);
-
-       /* OPPs are shared ? */
-       if (!of_property_read_bool(np, "opp-shared"))
-               goto put_cpu_node;
-
-       for_each_possible_cpu(cpu) {
-               if (cpu == cpu_dev->id)
-                       continue;
+       struct opp_device *opp_dev;
+       struct opp_table *opp_table;
+       int ret = 0;
 
-               tcpu_dev = get_cpu_device(cpu);
-               if (!tcpu_dev) {
-                       dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
-                               __func__, cpu);
-                       ret = -ENODEV;
-                       goto put_cpu_node;
-               }
+       mutex_lock(&opp_table_lock);
 
-               /* Get OPP descriptor node */
-               tmp_np = _of_get_opp_desc_node(tcpu_dev);
-               if (!tmp_np) {
-                       dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
-                               __func__);
-                       ret = -ENOENT;
-                       goto put_cpu_node;
-               }
+       opp_table = _find_opp_table(cpu_dev);
+       if (IS_ERR(opp_table)) {
+               ret = PTR_ERR(opp_table);
+               goto unlock;
+       }
 
-               /* CPUs are sharing opp node */
-               if (np == tmp_np)
-                       cpumask_set_cpu(cpu, cpumask);
+       cpumask_clear(cpumask);
 
-               of_node_put(tmp_np);
+       if (opp_table->shared_opp) {
+               list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+                       cpumask_set_cpu(opp_dev->dev->id, cpumask);
+       } else {
+               cpumask_set_cpu(cpu_dev->id, cpumask);
        }
 
-put_cpu_node:
-       of_node_put(np);
+unlock:
+       mutex_unlock(&opp_table_lock);
+
        return ret;
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
new file mode 100644 (file)
index 0000000..94d2010
--- /dev/null
@@ -0,0 +1,591 @@
+/*
+ * Generic OPP OF helpers
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ *     Nishanth Menon
+ *     Romit Dasgupta
+ *     Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/export.h>
+
+#include "opp.h"
+
+static struct opp_table *_managed_opp(const struct device_node *np)
+{
+       struct opp_table *opp_table;
+
+       list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+               if (opp_table->np == np) {
+                       /*
+                        * Multiple devices can point to the same OPP table and
+                        * so will have same node-pointer, np.
+                        *
+                        * But the OPPs will be considered as shared only if the
+                        * OPP table contains a "opp-shared" property.
+                        */
+                       return opp_table->shared_opp ? opp_table : NULL;
+               }
+       }
+
+       return NULL;
+}
+
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
+{
+       struct device_node *np;
+
+       /*
+        * Only required for backward compatibility with v1 bindings, but isn't
+        * harmful for other cases. And so we do it unconditionally.
+        */
+       np = of_node_get(dev->of_node);
+       if (np) {
+               u32 val;
+
+               if (!of_property_read_u32(np, "clock-latency", &val))
+                       opp_table->clock_latency_ns_max = val;
+               of_property_read_u32(np, "voltage-tolerance",
+                                    &opp_table->voltage_tolerance_v1);
+               of_node_put(np);
+       }
+}
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+                             struct device_node *np)
+{
+       unsigned int count = opp_table->supported_hw_count;
+       u32 version;
+       int ret;
+
+       if (!opp_table->supported_hw)
+               return true;
+
+       while (count--) {
+               ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+                                                &version);
+               if (ret) {
+                       dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+                                __func__, count, ret);
+                       return false;
+               }
+
+               /* Both of these are bitwise masks of the versions */
+               if (!(version & opp_table->supported_hw[count]))
+                       return false;
+       }
+
+       return true;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+                             struct opp_table *opp_table)
+{
+       u32 microvolt[3] = {0};
+       u32 val;
+       int count, ret;
+       struct property *prop = NULL;
+       char name[NAME_MAX];
+
+       /* Search for "opp-microvolt-<name>" */
+       if (opp_table->prop_name) {
+               snprintf(name, sizeof(name), "opp-microvolt-%s",
+                        opp_table->prop_name);
+               prop = of_find_property(opp->np, name, NULL);
+       }
+
+       if (!prop) {
+               /* Search for "opp-microvolt" */
+               sprintf(name, "opp-microvolt");
+               prop = of_find_property(opp->np, name, NULL);
+
+               /* Missing property isn't a problem, but an invalid entry is */
+               if (!prop)
+                       return 0;
+       }
+
+       count = of_property_count_u32_elems(opp->np, name);
+       if (count < 0) {
+               dev_err(dev, "%s: Invalid %s property (%d)\n",
+                       __func__, name, count);
+               return count;
+       }
+
+       /* There can be one or three elements here */
+       if (count != 1 && count != 3) {
+               dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+                       __func__, name, count);
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+       if (ret) {
+               dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
+               return -EINVAL;
+       }
+
+       opp->u_volt = microvolt[0];
+
+       if (count == 1) {
+               opp->u_volt_min = opp->u_volt;
+               opp->u_volt_max = opp->u_volt;
+       } else {
+               opp->u_volt_min = microvolt[1];
+               opp->u_volt_max = microvolt[2];
+       }
+
+       /* Search for "opp-microamp-<name>" */
+       prop = NULL;
+       if (opp_table->prop_name) {
+               snprintf(name, sizeof(name), "opp-microamp-%s",
+                        opp_table->prop_name);
+               prop = of_find_property(opp->np, name, NULL);
+       }
+
+       if (!prop) {
+               /* Search for "opp-microamp" */
+               sprintf(name, "opp-microamp");
+               prop = of_find_property(opp->np, name, NULL);
+       }
+
+       if (prop && !of_property_read_u32(opp->np, name, &val))
+               opp->u_amp = val;
+
+       return 0;
+}
+
+/**
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ *                               entries
+ * @dev:       device pointer used to lookup OPP table.
+ *
+ * Free OPPs created using static entries present in DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_remove_table(struct device *dev)
+{
+       _dev_pm_opp_remove_table(dev, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+       /*
+        * TODO: Support for multiple OPP tables.
+        *
+        * There should be only ONE phandle present in "operating-points-v2"
+        * property.
+        */
+
+       return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+}
+
+/**
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @dev:       device for which we do this operation
+ * @np:                device node
+ *
+ * This function adds an opp definition to the opp table and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ * -EINVAL     Failed parsing the OPP node
+ */
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *new_opp;
+       u64 rate;
+       u32 val;
+       int ret;
+
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
+
+       new_opp = _allocate_opp(dev, &opp_table);
+       if (!new_opp) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       ret = of_property_read_u64(np, "opp-hz", &rate);
+       if (ret < 0) {
+               dev_err(dev, "%s: opp-hz not found\n", __func__);
+               goto free_opp;
+       }
+
+       /* Check if the OPP supports hardware's hierarchy of versions or not */
+       if (!_opp_is_supported(dev, opp_table, np)) {
+               dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+               goto free_opp;
+       }
+
+       /*
+        * Rate is defined as an unsigned long in clk API, and so casting
+        * explicitly to its type. Must be fixed once rate is 64 bit
+        * guaranteed in clk API.
+        */
+       new_opp->rate = (unsigned long)rate;
+       new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+       new_opp->np = np;
+       new_opp->dynamic = false;
+       new_opp->available = true;
+
+       if (!of_property_read_u32(np, "clock-latency-ns", &val))
+               new_opp->clock_latency_ns = val;
+
+       ret = opp_parse_supplies(new_opp, dev, opp_table);
+       if (ret)
+               goto free_opp;
+
+       ret = _opp_add(dev, new_opp, opp_table);
+       if (ret)
+               goto free_opp;
+
+       /* OPP to select on device suspend */
+       if (of_property_read_bool(np, "opp-suspend")) {
+               if (opp_table->suspend_opp) {
+                       dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+                                __func__, opp_table->suspend_opp->rate,
+                                new_opp->rate);
+               } else {
+                       new_opp->suspend = true;
+                       opp_table->suspend_opp = new_opp;
+               }
+       }
+
+       if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+               opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+       mutex_unlock(&opp_table_lock);
+
+       pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+                __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+                new_opp->u_volt_min, new_opp->u_volt_max,
+                new_opp->clock_latency_ns);
+
+       /*
+        * Notify the changes in the availability of the operable
+        * frequency/voltage list.
+        */
+       srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
+       return 0;
+
+free_opp:
+       _opp_remove(opp_table, new_opp, false);
+unlock:
+       mutex_unlock(&opp_table_lock);
+       return ret;
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+{
+       struct device_node *np;
+       struct opp_table *opp_table;
+       int ret = 0, count = 0;
+
+       mutex_lock(&opp_table_lock);
+
+       opp_table = _managed_opp(opp_np);
+       if (opp_table) {
+               /* OPPs are already managed */
+               if (!_add_opp_dev(dev, opp_table))
+                       ret = -ENOMEM;
+               mutex_unlock(&opp_table_lock);
+               return ret;
+       }
+       mutex_unlock(&opp_table_lock);
+
+       /* We have opp-table node now, iterate over it and add OPPs */
+       for_each_available_child_of_node(opp_np, np) {
+               count++;
+
+               ret = _opp_add_static_v2(dev, np);
+               if (ret) {
+                       dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+                               ret);
+                       goto free_table;
+               }
+       }
+
+       /* There should be one of more OPP defined */
+       if (WARN_ON(!count))
+               return -ENOENT;
+
+       mutex_lock(&opp_table_lock);
+
+       opp_table = _find_opp_table(dev);
+       if (WARN_ON(IS_ERR(opp_table))) {
+               ret = PTR_ERR(opp_table);
+               mutex_unlock(&opp_table_lock);
+               goto free_table;
+       }
+
+       opp_table->np = opp_np;
+       opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+       mutex_unlock(&opp_table_lock);
+
+       return 0;
+
+free_table:
+       dev_pm_opp_of_remove_table(dev);
+
+       return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_add_opp_table_v1(struct device *dev)
+{
+       const struct property *prop;
+       const __be32 *val;
+       int nr;
+
+       prop = of_find_property(dev->of_node, "operating-points", NULL);
+       if (!prop)
+               return -ENODEV;
+       if (!prop->value)
+               return -ENODATA;
+
+       /*
+        * Each OPP is a set of tuples consisting of frequency and
+        * voltage like <freq-kHz vol-uV>.
+        */
+       nr = prop->length / sizeof(u32);
+       if (nr % 2) {
+               dev_err(dev, "%s: Invalid OPP table\n", __func__);
+               return -EINVAL;
+       }
+
+       val = prop->value;
+       while (nr) {
+               unsigned long freq = be32_to_cpup(val++) * 1000;
+               unsigned long volt = be32_to_cpup(val++);
+
+               if (_opp_add_v1(dev, freq, volt, false))
+                       dev_warn(dev, "%s: Failed to add OPP %ld\n",
+                                __func__, freq);
+               nr -= 2;
+       }
+
+       return 0;
+}
+
+/**
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * @dev:       device pointer used to lookup OPP table.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ * -ENODEV     when 'operating-points' property is not found or is invalid data
+ *             in device node.
+ * -ENODATA    when empty 'operating-points' property is found
+ * -EINVAL     when invalid entries are found in opp-v2 table
+ */
+int dev_pm_opp_of_add_table(struct device *dev)
+{
+       struct device_node *opp_np;
+       int ret;
+
+       /*
+        * OPPs have two version of bindings now. The older one is deprecated,
+        * try for the new binding first.
+        */
+       opp_np = _of_get_opp_desc_node(dev);
+       if (!opp_np) {
+               /*
+                * Try old-deprecated bindings for backward compatibility with
+                * older dtbs.
+                */
+               return _of_add_opp_table_v1(dev);
+       }
+
+       ret = _of_add_opp_table_v2(dev, opp_np);
+       of_node_put(opp_np);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
+
+/* CPU device specific helpers */
+
+/**
+ * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask:   cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used only to remove static entries created from DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
+{
+       _dev_pm_opp_cpumask_remove_table(cpumask, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
+ * @cpumask:   cpumask for which OPP table needs to be added.
+ *
+ * This adds the OPP tables for CPUs present in the @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
+{
+       struct device *cpu_dev;
+       int cpu, ret = 0;
+
+       WARN_ON(cpumask_empty(cpumask));
+
+       for_each_cpu(cpu, cpumask) {
+               cpu_dev = get_cpu_device(cpu);
+               if (!cpu_dev) {
+                       pr_err("%s: failed to get cpu%d device\n", __func__,
+                              cpu);
+                       continue;
+               }
+
+               ret = dev_pm_opp_of_add_table(cpu_dev);
+               if (ret) {
+                       pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+                              __func__, cpu, ret);
+
+                       /* Free all other OPPs */
+                       dev_pm_opp_of_cpumask_remove_table(cpumask);
+                       break;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+/**
+ * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
+ *                                   @cpu_dev using operating-points-v2
+ *                                   bindings.
+ *
+ * @cpu_dev:   CPU device for which we do this operation
+ * @cpumask:   cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
+                                  struct cpumask *cpumask)
+{
+       struct device_node *np, *tmp_np;
+       struct device *tcpu_dev;
+       int cpu, ret = 0;
+
+       /* Get OPP descriptor node */
+       np = _of_get_opp_desc_node(cpu_dev);
+       if (!np) {
+               dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+               return -ENOENT;
+       }
+
+       cpumask_set_cpu(cpu_dev->id, cpumask);
+
+       /* OPPs are shared ? */
+       if (!of_property_read_bool(np, "opp-shared"))
+               goto put_cpu_node;
+
+       for_each_possible_cpu(cpu) {
+               if (cpu == cpu_dev->id)
+                       continue;
+
+               tcpu_dev = get_cpu_device(cpu);
+               if (!tcpu_dev) {
+                       dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+                               __func__, cpu);
+                       ret = -ENODEV;
+                       goto put_cpu_node;
+               }
+
+               /* Get OPP descriptor node */
+               tmp_np = _of_get_opp_desc_node(tcpu_dev);
+               if (!tmp_np) {
+                       dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+                               __func__);
+                       ret = -ENOENT;
+                       goto put_cpu_node;
+               }
+
+               /* CPUs are sharing opp node */
+               if (np == tmp_np)
+                       cpumask_set_cpu(cpu, cpumask);
+
+               of_node_put(tmp_np);
+       }
+
+put_cpu_node:
+       of_node_put(np);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
index f67f806fcf3ae8f13866336cdc54958bd57f59b9..20f3be22e060f38761c9f949d776b33e7de756f0 100644 (file)
@@ -28,6 +28,8 @@ struct regulator;
 /* Lock to allow exclusive modification to the device and opp lists */
 extern struct mutex opp_table_lock;
 
+extern struct list_head opp_tables;
+
 /*
  * Internal data structure organization with the OPP layer library is as
  * follows:
@@ -183,6 +185,18 @@ struct opp_table {
 struct opp_table *_find_opp_table(struct device *dev);
 struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
 struct device_node *_of_get_opp_desc_node(struct device *dev);
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
+struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+
+#ifdef CONFIG_OF
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
+#else
+static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 void opp_debug_remove_one(struct dev_pm_opp *opp);
index 9b1a65debd49e645d41343eac7d54a4369ddc2fd..7f692accdc90ec296c55ba738b2cd7f65426ba26 100644 (file)
@@ -21,7 +21,7 @@
 
 static inline bool is_pset_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_PDATA;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
 }
 
 static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
index 02e18182fcb5506da4c4981a0d5f1926376099e6..2beb396fe6523cb332ec369d6a65a1c7bf9d3e25 100644 (file)
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
        } else {
                clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
-               clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+               clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
                clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
                clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m",          base + 0x24, 0,  6);
                clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
index a7f45853c10337c25a6aea4dca90aa7cca5f99b5..b7445b6ae5a4be8c72507705b82ef0a56b37ca3e 100644 (file)
@@ -18,7 +18,11 @@ config CPU_FREQ
 
 if CPU_FREQ
 
+config CPU_FREQ_GOV_ATTR_SET
+       bool
+
 config CPU_FREQ_GOV_COMMON
+       select CPU_FREQ_GOV_ATTR_SET
        select IRQ_WORK
        bool
 
@@ -103,6 +107,17 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
          Be aware that not all cpufreq drivers support the conservative
          governor. If unsure have a look at the help section of the
          driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+       bool "schedutil"
+       depends on SMP
+       select CPU_FREQ_GOV_SCHEDUTIL
+       select CPU_FREQ_GOV_PERFORMANCE
+       help
+         Use the 'schedutil' CPUFreq governor by default. If unsure,
+         have a look at the help section of that governor. The fallback
+         governor will be 'performance'.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -184,6 +199,26 @@ config CPU_FREQ_GOV_CONSERVATIVE
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_SCHEDUTIL
+       tristate "'schedutil' cpufreq policy governor"
+       depends on CPU_FREQ && SMP
+       select CPU_FREQ_GOV_ATTR_SET
+       select IRQ_WORK
+       help
+         This governor makes decisions based on the utilization data provided
+         by the scheduler.  It sets the CPU frequency to be proportional to
+         the utilization/capacity ratio coming from the scheduler.  If the
+         utilization is frequency-invariant, the new frequency is also
+         proportional to the maximum available frequency.  If that is not the
+         case, it is proportional to the current frequency of the CPU.  The
+         frequency tipping point is at utilization/capacity equal to 80% in
+         both cases.
+
+         To compile this driver as a module, choose M here: the module will
+         be called cpufreq_schedutil.
+
+         If in doubt, say N.
+
 comment "CPU frequency scaling drivers"
 
 config CPUFREQ_DT
@@ -191,6 +226,7 @@ config CPUFREQ_DT
        depends on HAVE_CLK && OF
        # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
        depends on !CPU_THERMAL || THERMAL
+       select CPUFREQ_DT_PLATDEV
        select PM_OPP
        help
          This adds a generic DT based cpufreq driver for frequency management.
@@ -199,6 +235,15 @@ config CPUFREQ_DT
 
          If in doubt, say N.
 
+config CPUFREQ_DT_PLATDEV
+       bool
+       help
+         This adds a generic DT based cpufreq platdev driver for frequency
+         management.  This creates a 'cpufreq-dt' platform device, on the
+         supported platforms.
+
+         If in doubt, say N.
+
 if X86
 source "drivers/cpufreq/Kconfig.x86"
 endif
index 14b1f9393b057e106d7b068883ce486ea70239ec..d89b8afe23b6956b6c48e19c87cecc081e7725a1 100644 (file)
@@ -50,15 +50,6 @@ config ARM_HIGHBANK_CPUFREQ
 
          If in doubt, say N.
 
-config ARM_HISI_ACPU_CPUFREQ
-       tristate "Hisilicon ACPU CPUfreq driver"
-       depends on ARCH_HISI && CPUFREQ_DT
-       select PM_OPP
-       help
-         This enables the hisilicon ACPU CPUfreq driver.
-
-         If in doubt, say N.
-
 config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6 cpufreq support"
        depends on ARCH_MXC
index c59bdcb83217071087cc0a90ef62e5cd01ce38d7..adbd1de1cea55cbe6777d3a1c4dab546a6a68adf 100644 (file)
@@ -5,6 +5,7 @@
 config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
+       select ACPI_PROCESSOR if ACPI
        help
           This driver provides a P state for Intel core processors.
          The driver implements an internal governor and will become
index 9e63fb1b09f815fa70e1c4d908707ba39989f8fa..e1eb11ee35708049bc19063de73e977981251c14 100644 (file)
@@ -11,8 +11,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET)    += cpufreq_governor_attr_set.o
 
 obj-$(CONFIG_CPUFREQ_DT)               += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_PLATDEV)       += cpufreq-dt-platdev.o
 
 ##################################################################################
 # x86 drivers.
@@ -53,7 +55,6 @@ obj-$(CONFIG_ARCH_DAVINCI)            += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)     += highbank-cpufreq.o
-obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ)    += hisi-acpu-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)                += imx6q-cpufreq.o
 obj-$(CONFIG_ARM_INTEGRATOR)           += integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)     += kirkwood-cpufreq.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_ARM_TEGRA20_CPUFREQ)     += tegra20-cpufreq.o
 obj-$(CONFIG_ARM_TEGRA124_CPUFREQ)     += tegra124-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7)            += mvebu-cpufreq.o
 
 
 ##################################################################################
index fb5712141040abd750829f6a212c98044ddda52f..32a15052f363f886f0dbd2acc58a8bd48cc5286f 100644 (file)
@@ -25,6 +25,8 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -50,8 +52,6 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
 MODULE_LICENSE("GPL");
 
-#define PFX "acpi-cpufreq: "
-
 enum {
        UNDEFINED_CAPABLE = 0,
        SYSTEM_INTEL_MSR_CAPABLE,
@@ -65,7 +65,6 @@ enum {
 #define MSR_K7_HWCR_CPB_DIS    (1ULL << 25)
 
 struct acpi_cpufreq_data {
-       struct cpufreq_frequency_table *freq_table;
        unsigned int resume;
        unsigned int cpu_feature;
        unsigned int acpi_perf_cpu;
@@ -200,8 +199,9 @@ static int check_amd_hwpstate_cpu(unsigned int cpuid)
        return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
 }
 
-static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
 {
+       struct acpi_cpufreq_data *data = policy->driver_data;
        struct acpi_processor_performance *perf;
        int i;
 
@@ -209,13 +209,14 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
 
        for (i = 0; i < perf->state_count; i++) {
                if (value == perf->states[i].status)
-                       return data->freq_table[i].frequency;
+                       return policy->freq_table[i].frequency;
        }
        return 0;
 }
 
-static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
+static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
 {
+       struct acpi_cpufreq_data *data = policy->driver_data;
        struct cpufreq_frequency_table *pos;
        struct acpi_processor_performance *perf;
 
@@ -226,20 +227,22 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
 
        perf = to_perf_data(data);
 
-       cpufreq_for_each_entry(pos, data->freq_table)
+       cpufreq_for_each_entry(pos, policy->freq_table)
                if (msr == perf->states[pos->driver_data].status)
                        return pos->frequency;
-       return data->freq_table[0].frequency;
+       return policy->freq_table[0].frequency;
 }
 
-static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
 {
+       struct acpi_cpufreq_data *data = policy->driver_data;
+
        switch (data->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
        case SYSTEM_AMD_MSR_CAPABLE:
-               return extract_msr(val, data);
+               return extract_msr(policy, val);
        case SYSTEM_IO_CAPABLE:
-               return extract_io(val, data);
+               return extract_io(policy, val);
        default:
                return 0;
        }
@@ -374,11 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
                return 0;
 
        data = policy->driver_data;
-       if (unlikely(!data || !data->freq_table))
+       if (unlikely(!data || !policy->freq_table))
                return 0;
 
-       cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
-       freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
+       cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+       freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
        if (freq != cached_freq) {
                /*
                 * The dreaded BIOS frequency change behind our back.
@@ -392,14 +395,15 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
        return freq;
 }
 
-static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
-                               struct acpi_cpufreq_data *data)
+static unsigned int check_freqs(struct cpufreq_policy *policy,
+                               const struct cpumask *mask, unsigned int freq)
 {
+       struct acpi_cpufreq_data *data = policy->driver_data;
        unsigned int cur_freq;
        unsigned int i;
 
        for (i = 0; i < 100; i++) {
-               cur_freq = extract_freq(get_cur_val(mask, data), data);
+               cur_freq = extract_freq(policy, get_cur_val(mask, data));
                if (cur_freq == freq)
                        return 1;
                udelay(10);
@@ -416,12 +420,12 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
        unsigned int next_perf_state = 0; /* Index into perf table */
        int result = 0;
 
-       if (unlikely(data == NULL || data->freq_table == NULL)) {
+       if (unlikely(!data)) {
                return -ENODEV;
        }
 
        perf = to_perf_data(data);
-       next_perf_state = data->freq_table[index].driver_data;
+       next_perf_state = policy->freq_table[index].driver_data;
        if (perf->state == next_perf_state) {
                if (unlikely(data->resume)) {
                        pr_debug("Called after resume, resetting to P%d\n",
@@ -444,8 +448,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
        drv_write(data, mask, perf->states[next_perf_state].control);
 
        if (acpi_pstate_strict) {
-               if (!check_freqs(mask, data->freq_table[index].frequency,
-                                       data)) {
+               if (!check_freqs(policy, mask,
+                                policy->freq_table[index].frequency)) {
                        pr_debug("acpi_cpufreq_target failed (%d)\n",
                                policy->cpu);
                        result = -EAGAIN;
@@ -458,6 +462,43 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
        return result;
 }
 
+unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+                                     unsigned int target_freq)
+{
+       struct acpi_cpufreq_data *data = policy->driver_data;
+       struct acpi_processor_performance *perf;
+       struct cpufreq_frequency_table *entry;
+       unsigned int next_perf_state, next_freq, freq;
+
+       /*
+        * Find the closest frequency above target_freq.
+        *
+        * The table is sorted in the reverse order with respect to the
+        * frequency and all of the entries are valid (see the initialization).
+        */
+       entry = policy->freq_table;
+       do {
+               entry++;
+               freq = entry->frequency;
+       } while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
+       entry--;
+       next_freq = entry->frequency;
+       next_perf_state = entry->driver_data;
+
+       perf = to_perf_data(data);
+       if (perf->state == next_perf_state) {
+               if (unlikely(data->resume))
+                       data->resume = 0;
+               else
+                       return next_freq;
+       }
+
+       data->cpu_freq_write(&perf->control_register,
+                            perf->states[next_perf_state].control);
+       perf->state = next_perf_state;
+       return next_freq;
+}
+
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
@@ -611,10 +652,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
                if ((c->x86 == 15) &&
                    (c->x86_model == 6) &&
                    (c->x86_mask == 8)) {
-                       printk(KERN_INFO "acpi-cpufreq: Intel(R) "
-                           "Xeon(R) 7100 Errata AL30, processors may "
-                           "lock up on frequency changes: disabling "
-                           "acpi-cpufreq.\n");
+                       pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
                        return -ENODEV;
                    }
                }
@@ -631,6 +669,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        unsigned int result = 0;
        struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
        struct acpi_processor_performance *perf;
+       struct cpufreq_frequency_table *freq_table;
 #ifdef CONFIG_SMP
        static int blacklisted;
 #endif
@@ -690,7 +729,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                cpumask_copy(data->freqdomain_cpus,
                             topology_sibling_cpumask(cpu));
                policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
-               pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+               pr_info_once("overriding BIOS provided _PSD data\n");
        }
 #endif
 
@@ -742,9 +781,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                goto err_unreg;
        }
 
-       data->freq_table = kzalloc(sizeof(*data->freq_table) *
+       freq_table = kzalloc(sizeof(*freq_table) *
                    (perf->state_count+1), GFP_KERNEL);
-       if (!data->freq_table) {
+       if (!freq_table) {
                result = -ENOMEM;
                goto err_unreg;
        }
@@ -762,30 +801,29 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
            policy->cpuinfo.transition_latency > 20 * 1000) {
                policy->cpuinfo.transition_latency = 20 * 1000;
-               printk_once(KERN_INFO
-                           "P-state transition latency capped at 20 uS\n");
+               pr_info_once("P-state transition latency capped at 20 uS\n");
        }
 
        /* table init */
        for (i = 0; i < perf->state_count; i++) {
                if (i > 0 && perf->states[i].core_frequency >=
-                   data->freq_table[valid_states-1].frequency / 1000)
+                   freq_table[valid_states-1].frequency / 1000)
                        continue;
 
-               data->freq_table[valid_states].driver_data = i;
-               data->freq_table[valid_states].frequency =
+               freq_table[valid_states].driver_data = i;
+               freq_table[valid_states].frequency =
                    perf->states[i].core_frequency * 1000;
                valid_states++;
        }
-       data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+       freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
        perf->state = 0;
 
-       result = cpufreq_table_validate_and_show(policy, data->freq_table);
+       result = cpufreq_table_validate_and_show(policy, freq_table);
        if (result)
                goto err_freqfree;
 
        if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
-               printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
+               pr_warn(FW_WARN "P-state 0 is not max freq\n");
 
        switch (perf->control_register.space_id) {
        case ACPI_ADR_SPACE_SYSTEM_IO:
@@ -821,10 +859,13 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
         */
        data->resume = 1;
 
+       policy->fast_switch_possible = !acpi_pstate_strict &&
+               !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+
        return result;
 
 err_freqfree:
-       kfree(data->freq_table);
+       kfree(freq_table);
 err_unreg:
        acpi_processor_unregister_performance(cpu);
 err_free_mask:
@@ -842,13 +883,12 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 
        pr_debug("acpi_cpufreq_cpu_exit\n");
 
-       if (data) {
-               policy->driver_data = NULL;
-               acpi_processor_unregister_performance(data->acpi_perf_cpu);
-               free_cpumask_var(data->freqdomain_cpus);
-               kfree(data->freq_table);
-               kfree(data);
-       }
+       policy->fast_switch_possible = false;
+       policy->driver_data = NULL;
+       acpi_processor_unregister_performance(data->acpi_perf_cpu);
+       free_cpumask_var(data->freqdomain_cpus);
+       kfree(policy->freq_table);
+       kfree(data);
 
        return 0;
 }
@@ -876,6 +916,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
 static struct cpufreq_driver acpi_cpufreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = acpi_cpufreq_target,
+       .fast_switch    = acpi_cpufreq_fast_switch,
        .bios_limit     = acpi_processor_get_bios_limit,
        .init           = acpi_cpufreq_cpu_init,
        .exit           = acpi_cpufreq_cpu_exit,
index c251247ae6613e860164627d8c8161f2b0d0cfe5..418042201e6da9e501ac6fb161437b117491b3ff 100644 (file)
@@ -298,7 +298,8 @@ static int merge_cluster_tables(void)
        return 0;
 }
 
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                           const struct cpumask *cpumask)
 {
        u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
 
@@ -308,11 +309,12 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
        clk_put(clk[cluster]);
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
        if (arm_bL_ops->free_opp_table)
-               arm_bL_ops->free_opp_table(cpu_dev);
+               arm_bL_ops->free_opp_table(cpumask);
        dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
 }
 
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                          const struct cpumask *cpumask)
 {
        u32 cluster = cpu_to_cluster(cpu_dev->id);
        int i;
@@ -321,7 +323,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
                return;
 
        if (cluster < MAX_CLUSTERS)
-               return _put_cluster_clk_and_freq_table(cpu_dev);
+               return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
 
        for_each_present_cpu(i) {
                struct device *cdev = get_cpu_device(i);
@@ -330,14 +332,15 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
                        return;
                }
 
-               _put_cluster_clk_and_freq_table(cdev);
+               _put_cluster_clk_and_freq_table(cdev, cpumask);
        }
 
        /* free virtual table */
        kfree(freq_table[cluster]);
 }
 
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                          const struct cpumask *cpumask)
 {
        u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
        int ret;
@@ -345,7 +348,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
        if (freq_table[cluster])
                return 0;
 
-       ret = arm_bL_ops->init_opp_table(cpu_dev);
+       ret = arm_bL_ops->init_opp_table(cpumask);
        if (ret) {
                dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
                                __func__, cpu_dev->id, ret);
@@ -374,14 +377,15 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
 
 free_opp_table:
        if (arm_bL_ops->free_opp_table)
-               arm_bL_ops->free_opp_table(cpu_dev);
+               arm_bL_ops->free_opp_table(cpumask);
 out:
        dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
                        cluster);
        return ret;
 }
 
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                         const struct cpumask *cpumask)
 {
        u32 cluster = cpu_to_cluster(cpu_dev->id);
        int i, ret;
@@ -390,7 +394,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
                return 0;
 
        if (cluster < MAX_CLUSTERS) {
-               ret = _get_cluster_clk_and_freq_table(cpu_dev);
+               ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
                if (ret)
                        atomic_dec(&cluster_usage[cluster]);
                return ret;
@@ -407,7 +411,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
                        return -ENODEV;
                }
 
-               ret = _get_cluster_clk_and_freq_table(cdev);
+               ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
                if (ret)
                        goto put_clusters;
        }
@@ -433,7 +437,7 @@ put_clusters:
                        return -ENODEV;
                }
 
-               _put_cluster_clk_and_freq_table(cdev);
+               _put_cluster_clk_and_freq_table(cdev, cpumask);
        }
 
        atomic_dec(&cluster_usage[cluster]);
@@ -455,18 +459,6 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       ret = get_cluster_clk_and_freq_table(cpu_dev);
-       if (ret)
-               return ret;
-
-       ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
-       if (ret) {
-               dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
-                               policy->cpu, cur_cluster);
-               put_cluster_clk_and_freq_table(cpu_dev);
-               return ret;
-       }
-
        if (cur_cluster < MAX_CLUSTERS) {
                int cpu;
 
@@ -479,6 +471,18 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
                per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
        }
 
+       ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+       if (ret)
+               return ret;
+
+       ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
+       if (ret) {
+               dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+                       policy->cpu, cur_cluster);
+               put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+               return ret;
+       }
+
        if (arm_bL_ops->get_transition_latency)
                policy->cpuinfo.transition_latency =
                        arm_bL_ops->get_transition_latency(cpu_dev);
@@ -509,7 +513,7 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       put_cluster_clk_and_freq_table(cpu_dev);
+       put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
        dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
        return 0;
index b88889d9387ed5d5d3183c6d19b7fc2c8208d230..184d7c3a112a8ca45e652f4c519ef506144db5fc 100644 (file)
@@ -30,11 +30,11 @@ struct cpufreq_arm_bL_ops {
         * This must set opp table for cpu_dev in a similar way as done by
         * dev_pm_opp_of_add_table().
         */
-       int (*init_opp_table)(struct device *cpu_dev);
+       int (*init_opp_table)(const struct cpumask *cpumask);
 
        /* Optional */
        int (*get_transition_latency)(struct device *cpu_dev);
-       void (*free_opp_table)(struct device *cpu_dev);
+       void (*free_opp_table)(const struct cpumask *cpumask);
 };
 
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
index 16ddeefe94433a4edb369636131a4bd69912ccf7..39b3f51d9a30a971eebccc1e98c5d679ecafa500 100644 (file)
@@ -43,23 +43,6 @@ static struct device_node *get_cpu_node_with_valid_op(int cpu)
        return np;
 }
 
-static int dt_init_opp_table(struct device *cpu_dev)
-{
-       struct device_node *np;
-       int ret;
-
-       np = of_node_get(cpu_dev->of_node);
-       if (!np) {
-               pr_err("failed to find cpu%d node\n", cpu_dev->id);
-               return -ENOENT;
-       }
-
-       ret = dev_pm_opp_of_add_table(cpu_dev);
-       of_node_put(np);
-
-       return ret;
-}
-
 static int dt_get_transition_latency(struct device *cpu_dev)
 {
        struct device_node *np;
@@ -81,8 +64,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
 static struct cpufreq_arm_bL_ops dt_bL_ops = {
        .name   = "dt-bl",
        .get_transition_latency = dt_get_transition_latency,
-       .init_opp_table = dt_init_opp_table,
-       .free_opp_table = dev_pm_opp_of_remove_table,
+       .init_opp_table = dev_pm_opp_of_cpumask_add_table,
+       .free_opp_table = dev_pm_opp_of_cpumask_remove_table,
 };
 
 static int generic_bL_probe(struct platform_device *pdev)
index 7c0bdfb1a2ca47ea9dcd0b36ef892274edd929b4..8882b8e2ecd0d0fe82ddd8cbb1bf8c4e29f4b886 100644 (file)
@@ -173,4 +173,25 @@ out:
        return -ENODEV;
 }
 
+static void __exit cppc_cpufreq_exit(void)
+{
+       struct cpudata *cpu;
+       int i;
+
+       cpufreq_unregister_driver(&cppc_cpufreq_driver);
+
+       for_each_possible_cpu(i) {
+               cpu = all_cpu_data[i];
+               free_cpumask_var(cpu->shared_cpu_map);
+               kfree(cpu);
+       }
+
+       kfree(all_cpu_data);
+}
+
+module_exit(cppc_cpufreq_exit);
+MODULE_AUTHOR("Ashwin Chaugule");
+MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
+MODULE_LICENSE("GPL");
+
 late_initcall(cppc_cpufreq_init);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
new file mode 100644 (file)
index 0000000..3646b14
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const struct of_device_id machines[] __initconst = {
+       { .compatible = "allwinner,sun4i-a10", },
+       { .compatible = "allwinner,sun5i-a10s", },
+       { .compatible = "allwinner,sun5i-a13", },
+       { .compatible = "allwinner,sun5i-r8", },
+       { .compatible = "allwinner,sun6i-a31", },
+       { .compatible = "allwinner,sun6i-a31s", },
+       { .compatible = "allwinner,sun7i-a20", },
+       { .compatible = "allwinner,sun8i-a23", },
+       { .compatible = "allwinner,sun8i-a33", },
+       { .compatible = "allwinner,sun8i-a83t", },
+       { .compatible = "allwinner,sun8i-h3", },
+
+       { .compatible = "hisilicon,hi6220", },
+
+       { .compatible = "fsl,imx27", },
+       { .compatible = "fsl,imx51", },
+       { .compatible = "fsl,imx53", },
+       { .compatible = "fsl,imx7d", },
+
+       { .compatible = "marvell,berlin", },
+
+       { .compatible = "samsung,exynos3250", },
+       { .compatible = "samsung,exynos4210", },
+       { .compatible = "samsung,exynos4212", },
+       { .compatible = "samsung,exynos4412", },
+       { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+       { .compatible = "samsung,exynos5420", },
+       { .compatible = "samsung,exynos5800", },
+#endif
+
+       { .compatible = "renesas,emev2", },
+       { .compatible = "renesas,r7s72100", },
+       { .compatible = "renesas,r8a73a4", },
+       { .compatible = "renesas,r8a7740", },
+       { .compatible = "renesas,r8a7778", },
+       { .compatible = "renesas,r8a7779", },
+       { .compatible = "renesas,r8a7790", },
+       { .compatible = "renesas,r8a7791", },
+       { .compatible = "renesas,r8a7793", },
+       { .compatible = "renesas,r8a7794", },
+       { .compatible = "renesas,sh73a0", },
+
+       { .compatible = "rockchip,rk2928", },
+       { .compatible = "rockchip,rk3036", },
+       { .compatible = "rockchip,rk3066a", },
+       { .compatible = "rockchip,rk3066b", },
+       { .compatible = "rockchip,rk3188", },
+       { .compatible = "rockchip,rk3228", },
+       { .compatible = "rockchip,rk3288", },
+       { .compatible = "rockchip,rk3366", },
+       { .compatible = "rockchip,rk3368", },
+       { .compatible = "rockchip,rk3399", },
+
+       { .compatible = "sigma,tango4" },
+
+       { .compatible = "ti,omap2", },
+       { .compatible = "ti,omap3", },
+       { .compatible = "ti,omap4", },
+       { .compatible = "ti,omap5", },
+
+       { .compatible = "xlnx,zynq-7000", },
+};
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+       struct device_node *np = of_find_node_by_path("/");
+
+       if (!np)
+               return -ENODEV;
+
+       if (!of_match_node(machines, np))
+               return -ENODEV;
+
+       of_node_put(of_root);
+
+       return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
+                                                              NULL, 0));
+}
+device_initcall(cpufreq_dt_platdev_init);
index 5f8dbe640a202baa2b12d26267ca78b983c8b7fc..3957de801ae8260770051bc07a0fc391ea45c9c1 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
 #include <linux/cpufreq.h>
-#include <linux/cpufreq-dt.h>
 #include <linux/cpumask.h>
 #include <linux/err.h>
 #include <linux/module.h>
@@ -147,7 +146,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        struct clk *cpu_clk;
        struct dev_pm_opp *suspend_opp;
        unsigned int transition_latency;
-       bool opp_v1 = false;
+       bool fallback = false;
        const char *name;
        int ret;
 
@@ -167,14 +166,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        /* Get OPP-sharing information from "operating-points-v2" bindings */
        ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
        if (ret) {
+               if (ret != -ENOENT)
+                       goto out_put_clk;
+
                /*
                 * operating-points-v2 not supported, fallback to old method of
-                * finding shared-OPPs for backward compatibility.
+                * finding shared-OPPs for backward compatibility if the
+                * platform hasn't set sharing CPUs.
                 */
-               if (ret == -ENOENT)
-                       opp_v1 = true;
-               else
-                       goto out_put_clk;
+               if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
+                       fallback = true;
        }
 
        /*
@@ -214,11 +215,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                goto out_free_opp;
        }
 
-       if (opp_v1) {
-               struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
-
-               if (!pd || !pd->independent_clocks)
-                       cpumask_setall(policy->cpus);
+       if (fallback) {
+               cpumask_setall(policy->cpus);
 
                /*
                 * OPP tables are initialized only for policy->cpu, do it for
index db69eeb501a7d49514d7ddd255c65d2e96e4bbeb..5503d491b0160f39be799469aef3fd0d5bc60e72 100644 (file)
@@ -7,6 +7,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -56,8 +58,6 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
 MODULE_PARM_DESC(min_fsb,
                "Minimum FSB to use, if not defined: current FSB - 50");
 
-#define PFX "cpufreq-nforce2: "
-
 /**
  * nforce2_calc_fsb - calculate FSB
  * @pll: PLL value
@@ -174,13 +174,13 @@ static int nforce2_set_fsb(unsigned int fsb)
        int pll = 0;
 
        if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
-               printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
+               pr_err("FSB %d is out of range!\n", fsb);
                return -EINVAL;
        }
 
        tfsb = nforce2_fsb_read(0);
        if (!tfsb) {
-               printk(KERN_ERR PFX "Error while reading the FSB\n");
+               pr_err("Error while reading the FSB\n");
                return -EINVAL;
        }
 
@@ -276,8 +276,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
        /* local_irq_save(flags); */
 
        if (nforce2_set_fsb(target_fsb) < 0)
-               printk(KERN_ERR PFX "Changing FSB to %d failed\n",
-                       target_fsb);
+               pr_err("Changing FSB to %d failed\n", target_fsb);
        else
                pr_debug("Changed FSB successfully to %d\n",
                        target_fsb);
@@ -325,8 +324,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
        /* FIX: Get FID from CPU */
        if (!fid) {
                if (!cpu_khz) {
-                       printk(KERN_WARNING PFX
-                       "cpu_khz not set, can't calculate multiplier!\n");
+                       pr_warn("cpu_khz not set, can't calculate multiplier!\n");
                        return -ENODEV;
                }
 
@@ -341,8 +339,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
                }
        }
 
-       printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
-              fid / 10, fid % 10);
+       pr_info("FSB currently at %i MHz, FID %d.%d\n",
+               fsb, fid / 10, fid % 10);
 
        /* Set maximum FSB to FSB at boot time */
        max_fsb = nforce2_fsb_read(1);
@@ -401,11 +399,9 @@ static int nforce2_detect_chipset(void)
        if (nforce2_dev == NULL)
                return -ENODEV;
 
-       printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
-              nforce2_dev->revision);
-       printk(KERN_INFO PFX
-              "FSB changing is maybe unstable and can lead to "
-              "crashes and data loss.\n");
+       pr_info("Detected nForce2 chipset revision %X\n",
+               nforce2_dev->revision);
+       pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n");
 
        return 0;
 }
@@ -423,7 +419,7 @@ static int __init nforce2_init(void)
 
        /* detect chipset */
        if (nforce2_detect_chipset()) {
-               printk(KERN_INFO PFX "No nForce2 chipset.\n");
+               pr_info("No nForce2 chipset\n");
                return -ENODEV;
        }
 
index e93405f0eac46565d18c25873d1113c3a1aa65fd..035513b012eebf049cffbe713680c556993f944f 100644 (file)
@@ -78,6 +78,11 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
 static int cpufreq_start_governor(struct cpufreq_policy *policy);
 
+static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+       return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+}
+
 /**
  * Two notifier lists: the "policy" list is involved in the
  * validation process for a new CPU frequency policy; the
@@ -429,6 +434,73 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 
+/*
+ * Fast frequency switching status count.  Positive means "enabled", negative
+ * means "disabled" and 0 means "not decided yet".
+ */
+static int cpufreq_fast_switch_count;
+static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+
+static void cpufreq_list_transition_notifiers(void)
+{
+       struct notifier_block *nb;
+
+       pr_info("Registered transition notifiers:\n");
+
+       mutex_lock(&cpufreq_transition_notifier_list.mutex);
+
+       for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+               pr_info("%pF\n", nb->notifier_call);
+
+       mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+}
+
+/**
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ *
+ * Try to enable fast frequency switching for @policy.
+ *
+ * The attempt will fail if there is at least one transition notifier registered
+ * at this point, as fast frequency switching is quite fundamentally at odds
+ * with transition notifiers.  Thus if successful, it will make registration of
+ * transition notifiers fail going forward.
+ */
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+{
+       lockdep_assert_held(&policy->rwsem);
+
+       if (!policy->fast_switch_possible)
+               return;
+
+       mutex_lock(&cpufreq_fast_switch_lock);
+       if (cpufreq_fast_switch_count >= 0) {
+               cpufreq_fast_switch_count++;
+               policy->fast_switch_enabled = true;
+       } else {
+               pr_warn("CPU%u: Fast frequency switching not enabled\n",
+                       policy->cpu);
+               cpufreq_list_transition_notifiers();
+       }
+       mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+
+/**
+ * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+ * @policy: cpufreq policy to disable fast frequency switching for.
+ */
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+{
+       mutex_lock(&cpufreq_fast_switch_lock);
+       if (policy->fast_switch_enabled) {
+               policy->fast_switch_enabled = false;
+               if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+                       cpufreq_fast_switch_count--;
+       }
+       mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
 
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
@@ -1248,26 +1320,24 @@ out_free_policy:
  */
 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
+       struct cpufreq_policy *policy;
        unsigned cpu = dev->id;
-       int ret;
 
        dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
 
-       if (cpu_online(cpu)) {
-               ret = cpufreq_online(cpu);
-       } else {
-               /*
-                * A hotplug notifier will follow and we will handle it as CPU
-                * online then.  For now, just create the sysfs link, unless
-                * there is no policy or the link is already present.
-                */
-               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+       if (cpu_online(cpu))
+               return cpufreq_online(cpu);
 
-               ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-                       ? add_cpu_dev_symlink(policy, cpu) : 0;
-       }
+       /*
+        * A hotplug notifier will follow and we will handle it as CPU online
+        * then.  For now, just create the sysfs link, unless there is no policy
+        * or the link is already present.
+        */
+       policy = per_cpu(cpufreq_cpu_data, cpu);
+       if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+               return 0;
 
-       return ret;
+       return add_cpu_dev_symlink(policy, cpu);
 }
 
 static void cpufreq_offline(unsigned int cpu)
@@ -1319,7 +1389,7 @@ static void cpufreq_offline(unsigned int cpu)
 
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = cpufreq_exit_governor(policy);
                if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
        }
@@ -1447,8 +1517,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
 
        ret_freq = cpufreq_driver->get(policy->cpu);
 
-       /* Updating inactive policies is invalid, so avoid doing that. */
-       if (unlikely(policy_is_inactive(policy)))
+       /*
+        * Updating inactive policies is invalid, so avoid doing that.  Also
+        * if fast frequency switching is used with the given policy, the check
+        * against policy->cur is pointless, so skip it in that case too.
+        */
+       if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
                return ret_freq;
 
        if (ret_freq && policy->cur &&
@@ -1557,21 +1631,25 @@ void cpufreq_suspend(void)
        if (!cpufreq_driver)
                return;
 
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->suspend)
                goto suspend;
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
        for_each_active_policy(policy) {
-               down_write(&policy->rwsem);
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               up_write(&policy->rwsem);
+               if (has_target()) {
+                       down_write(&policy->rwsem);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+                       up_write(&policy->rwsem);
 
-               if (ret)
-                       pr_err("%s: Failed to stop governor for policy: %p\n",
-                               __func__, policy);
-               else if (cpufreq_driver->suspend
-                   && cpufreq_driver->suspend(policy))
+                       if (ret) {
+                               pr_err("%s: Failed to stop governor for policy: %p\n",
+                                       __func__, policy);
+                               continue;
+                       }
+               }
+
+               if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
                                policy);
        }
@@ -1596,7 +1674,7 @@ void cpufreq_resume(void)
 
        cpufreq_suspended = false;
 
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->resume)
                return;
 
        pr_debug("%s: Resuming Governors\n", __func__);
@@ -1605,7 +1683,7 @@ void cpufreq_resume(void)
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
-               } else {
+               } else if (has_target()) {
                        down_write(&policy->rwsem);
                        ret = cpufreq_start_governor(policy);
                        up_write(&policy->rwsem);
@@ -1675,8 +1753,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
 
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
+               mutex_lock(&cpufreq_fast_switch_lock);
+
+               if (cpufreq_fast_switch_count > 0) {
+                       mutex_unlock(&cpufreq_fast_switch_lock);
+                       return -EBUSY;
+               }
                ret = srcu_notifier_chain_register(
                                &cpufreq_transition_notifier_list, nb);
+               if (!ret)
+                       cpufreq_fast_switch_count--;
+
+               mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_register(
@@ -1709,8 +1797,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
 
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
+               mutex_lock(&cpufreq_fast_switch_lock);
+
                ret = srcu_notifier_chain_unregister(
                                &cpufreq_transition_notifier_list, nb);
+               if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+                       cpufreq_fast_switch_count++;
+
+               mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_unregister(
@@ -1729,6 +1823,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
  *                              GOVERNORS                            *
  *********************************************************************/
 
+/**
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+ * @policy: cpufreq policy to switch the frequency for.
+ * @target_freq: New frequency to set (may be approximate).
+ *
+ * Carry out a fast frequency switch without sleeping.
+ *
+ * The driver's ->fast_switch() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select the minimum available frequency greater than or
+ * equal to @target_freq (CPUFREQ_RELATION_L).
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same policy and that it will never be called in
+ * parallel with either ->target() or ->target_index() for the same policy.
+ *
+ * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
+ * callback to indicate an error condition, the hardware configuration must be
+ * preserved.
+ */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+                                       unsigned int target_freq)
+{
+       clamp_val(target_freq, policy->min, policy->max);
+
+       return cpufreq_driver->fast_switch(policy, target_freq);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+
 /* Must set freqs->new to intermediate frequency */
 static int __target_intermediate(struct cpufreq_policy *policy,
                                 struct cpufreq_freqs *freqs, int index)
@@ -2104,7 +2229,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
                        return ret;
                }
 
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = cpufreq_exit_governor(policy);
                if (ret) {
                        pr_err("%s: Failed to Exit Governor: %s (%d)\n",
                               __func__, old_gov->name, ret);
@@ -2121,7 +2246,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
                        pr_debug("cpufreq: governor change\n");
                        return 0;
                }
-               cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               cpufreq_exit_governor(policy);
        }
 
        /* new governor failed, so re-start old one */
@@ -2189,16 +2314,13 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
                cpufreq_online(cpu);
                break;
 
        case CPU_DOWN_PREPARE:
                cpufreq_offline(cpu);
                break;
-
-       case CPU_DOWN_FAILED:
-               cpufreq_online(cpu);
-               break;
        }
        return NOTIFY_OK;
 }
index bf4913f6453b1b26f0b71485ba59896bd4f62fe0..316df247e00da306a8e4c3bb4c90aa1507a85147 100644 (file)
@@ -129,9 +129,10 @@ static struct notifier_block cs_cpufreq_notifier_block = {
 /************************** sysfs interface ************************/
 static struct dbs_governor cs_dbs_gov;
 
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
-               const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+                                         const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        unsigned int input;
        int ret;
        ret = sscanf(buf, "%u", &input);
@@ -143,9 +144,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
        return count;
 }
 
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+                                 const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
        unsigned int input;
        int ret;
@@ -158,9 +160,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
        return count;
 }
 
-static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
+                                   const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
        unsigned int input;
        int ret;
@@ -175,9 +178,10 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
        return count;
 }
 
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
-               const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+                                     const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        unsigned int input;
        int ret;
 
@@ -199,9 +203,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
        return count;
 }
 
-static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
+                              size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
        unsigned int input;
        int ret;
index 5f1147fa9239cdc325eb8fae871d151a4b36337f..be498d56dd6971d10d4e46b23c814c6f10a5de72 100644 (file)
@@ -43,9 +43,10 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
  * This must be called with dbs_data->mutex held, otherwise traversing
  * policy_dbs_list isn't safe.
  */
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
                            size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
        unsigned int rate;
        int ret;
@@ -59,7 +60,7 @@ ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
         * We are operating under dbs_data->mutex and so the list and its
         * entries can't be freed concurrently.
         */
-       list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+       list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
                mutex_lock(&policy_dbs->timer_mutex);
                /*
                 * On 32-bit architectures this may race with the
@@ -96,13 +97,13 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
 {
        struct policy_dbs_info *policy_dbs;
 
-       list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+       list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
                unsigned int j;
 
                for_each_cpu(j, policy_dbs->policy->cpus) {
                        struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 
-                       j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
+                       j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
                                                                  dbs_data->io_is_busy);
                        if (dbs_data->ignore_nice_load)
                                j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -111,54 +112,6 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
 }
 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
 
-static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
-{
-       return container_of(kobj, struct dbs_data, kobj);
-}
-
-static inline struct governor_attr *to_gov_attr(struct attribute *attr)
-{
-       return container_of(attr, struct governor_attr, attr);
-}
-
-static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
-                            char *buf)
-{
-       struct dbs_data *dbs_data = to_dbs_data(kobj);
-       struct governor_attr *gattr = to_gov_attr(attr);
-
-       return gattr->show(dbs_data, buf);
-}
-
-static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct dbs_data *dbs_data = to_dbs_data(kobj);
-       struct governor_attr *gattr = to_gov_attr(attr);
-       int ret = -EBUSY;
-
-       mutex_lock(&dbs_data->mutex);
-
-       if (dbs_data->usage_count)
-               ret = gattr->store(dbs_data, buf, count);
-
-       mutex_unlock(&dbs_data->mutex);
-
-       return ret;
-}
-
-/*
- * Sysfs Ops for accessing governor attributes.
- *
- * All show/store invocations for governor specific sysfs attributes, will first
- * call the below show/store callbacks and the attribute specific callback will
- * be called from within it.
- */
-static const struct sysfs_ops governor_sysfs_ops = {
-       .show   = governor_show,
-       .store  = governor_store,
-};
-
 unsigned int dbs_update(struct cpufreq_policy *policy)
 {
        struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -184,14 +137,14 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
        /* Get Absolute Load */
        for_each_cpu(j, policy->cpus) {
                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
-               u64 cur_wall_time, cur_idle_time;
-               unsigned int idle_time, wall_time;
+               u64 update_time, cur_idle_time;
+               unsigned int idle_time, time_elapsed;
                unsigned int load;
 
-               cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
+               cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
 
-               wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
-               j_cdbs->prev_cpu_wall = cur_wall_time;
+               time_elapsed = update_time - j_cdbs->prev_update_time;
+               j_cdbs->prev_update_time = update_time;
 
                idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
                j_cdbs->prev_cpu_idle = cur_idle_time;
@@ -203,47 +156,62 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
                        j_cdbs->prev_cpu_nice = cur_nice;
                }
 
-               if (unlikely(!wall_time || wall_time < idle_time))
-                       continue;
-
-               /*
-                * If the CPU had gone completely idle, and a task just woke up
-                * on this CPU now, it would be unfair to calculate 'load' the
-                * usual way for this elapsed time-window, because it will show
-                * near-zero load, irrespective of how CPU intensive that task
-                * actually is. This is undesirable for latency-sensitive bursty
-                * workloads.
-                *
-                * To avoid this, we reuse the 'load' from the previous
-                * time-window and give this task a chance to start with a
-                * reasonably high CPU frequency. (However, we shouldn't over-do
-                * this copy, lest we get stuck at a high load (high frequency)
-                * for too long, even when the current system load has actually
-                * dropped down. So we perform the copy only once, upon the
-                * first wake-up from idle.)
-                *
-                * Detecting this situation is easy: the governor's utilization
-                * update handler would not have run during CPU-idle periods.
-                * Hence, an unusually large 'wall_time' (as compared to the
-                * sampling rate) indicates this scenario.
-                *
-                * prev_load can be zero in two cases and we must recalculate it
-                * for both cases:
-                * - during long idle intervals
-                * - explicitly set to zero
-                */
-               if (unlikely(wall_time > (2 * sampling_rate) &&
-                            j_cdbs->prev_load)) {
+               if (unlikely(!time_elapsed)) {
+                       /*
+                        * That can only happen when this function is called
+                        * twice in a row with a very short interval between the
+                        * calls, so the previous load value can be used then.
+                        */
                        load = j_cdbs->prev_load;
-
+               } else if (unlikely(time_elapsed > 2 * sampling_rate &&
+                                   j_cdbs->prev_load)) {
                        /*
-                        * Perform a destructive copy, to ensure that we copy
-                        * the previous load only once, upon the first wake-up
-                        * from idle.
+                        * If the CPU had gone completely idle and a task has
+                        * just woken up on this CPU now, it would be unfair to
+                        * calculate 'load' the usual way for this elapsed
+                        * time-window, because it would show near-zero load,
+                        * irrespective of how CPU intensive that task actually
+                        * was. This is undesirable for latency-sensitive bursty
+                        * workloads.
+                        *
+                        * To avoid this, reuse the 'load' from the previous
+                        * time-window and give this task a chance to start with
+                        * a reasonably high CPU frequency. However, that
+                        * shouldn't be over-done, lest we get stuck at a high
+                        * load (high frequency) for too long, even when the
+                        * current system load has actually dropped down, so
+                        * clear prev_load to guarantee that the load will be
+                        * computed again next time.
+                        *
+                        * Detecting this situation is easy: the governor's
+                        * utilization update handler would not have run during
+                        * CPU-idle periods.  Hence, an unusually large
+                        * 'time_elapsed' (as compared to the sampling rate)
+                        * indicates this scenario.
                         */
+                       load = j_cdbs->prev_load;
                        j_cdbs->prev_load = 0;
                } else {
-                       load = 100 * (wall_time - idle_time) / wall_time;
+                       if (time_elapsed >= idle_time) {
+                               load = 100 * (time_elapsed - idle_time) / time_elapsed;
+                       } else {
+                               /*
+                                * That can happen if idle_time is returned by
+                                * get_cpu_idle_time_jiffy().  In that case
+                                * idle_time is roughly equal to the difference
+                                * between time_elapsed and "busy time" obtained
+                                * from CPU statistics.  Then, the "busy time"
+                                * can end up being greater than time_elapsed
+                                * (for example, if jiffies_64 and the CPU
+                                * statistics are updated by different CPUs),
+                                * so idle_time may in fact be negative.  That
+                                * means, though, that the CPU was busy all
+                                * the time (on the rough average) during the
+                                * last sampling interval and 100 can be
+                                * returned as the load.
+                                */
+                               load = (int)idle_time < 0 ? 100 : 0;
+                       }
                        j_cdbs->prev_load = load;
                }
 
@@ -254,43 +222,6 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
 }
 EXPORT_SYMBOL_GPL(dbs_update);
 
-static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
-                               unsigned int delay_us)
-{
-       struct cpufreq_policy *policy = policy_dbs->policy;
-       int cpu;
-
-       gov_update_sample_delay(policy_dbs, delay_us);
-       policy_dbs->last_sample_time = 0;
-
-       for_each_cpu(cpu, policy->cpus) {
-               struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
-
-               cpufreq_set_update_util_data(cpu, &cdbs->update_util);
-       }
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
-       int i;
-
-       for_each_cpu(i, policy->cpus)
-               cpufreq_set_update_util_data(i, NULL);
-
-       synchronize_sched();
-}
-
-static void gov_cancel_work(struct cpufreq_policy *policy)
-{
-       struct policy_dbs_info *policy_dbs = policy->governor_data;
-
-       gov_clear_update_util(policy_dbs->policy);
-       irq_work_sync(&policy_dbs->irq_work);
-       cancel_work_sync(&policy_dbs->work);
-       atomic_set(&policy_dbs->work_count, 0);
-       policy_dbs->work_in_progress = false;
-}
-
 static void dbs_work_handler(struct work_struct *work)
 {
        struct policy_dbs_info *policy_dbs;
@@ -378,6 +309,44 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
        irq_work_queue(&policy_dbs->irq_work);
 }
 
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+                               unsigned int delay_us)
+{
+       struct cpufreq_policy *policy = policy_dbs->policy;
+       int cpu;
+
+       gov_update_sample_delay(policy_dbs, delay_us);
+       policy_dbs->last_sample_time = 0;
+
+       for_each_cpu(cpu, policy->cpus) {
+               struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+               cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
+                                            dbs_update_util_handler);
+       }
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+       int i;
+
+       for_each_cpu(i, policy->cpus)
+               cpufreq_remove_update_util_hook(i);
+
+       synchronize_sched();
+}
+
+static void gov_cancel_work(struct cpufreq_policy *policy)
+{
+       struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+       gov_clear_update_util(policy_dbs->policy);
+       irq_work_sync(&policy_dbs->irq_work);
+       cancel_work_sync(&policy_dbs->work);
+       atomic_set(&policy_dbs->work_count, 0);
+       policy_dbs->work_in_progress = false;
+}
+
 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
                                                     struct dbs_governor *gov)
 {
@@ -400,7 +369,6 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 
                j_cdbs->policy_dbs = policy_dbs;
-               j_cdbs->update_util.func = dbs_update_util_handler;
        }
        return policy_dbs;
 }
@@ -449,10 +417,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
                policy_dbs->dbs_data = dbs_data;
                policy->governor_data = policy_dbs;
 
-               mutex_lock(&dbs_data->mutex);
-               dbs_data->usage_count++;
-               list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
-               mutex_unlock(&dbs_data->mutex);
+               gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
                goto out;
        }
 
@@ -462,8 +427,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
                goto free_policy_dbs_info;
        }
 
-       INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
-       mutex_init(&dbs_data->mutex);
+       gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
 
        ret = gov->init(dbs_data, !policy->governor->initialized);
        if (ret)
@@ -483,14 +447,11 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
        if (!have_governor_per_policy())
                gov->gdbs_data = dbs_data;
 
-       policy->governor_data = policy_dbs;
-
        policy_dbs->dbs_data = dbs_data;
-       dbs_data->usage_count = 1;
-       list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
+       policy->governor_data = policy_dbs;
 
        gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
-       ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+       ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
                                   get_governor_parent_kobj(policy),
                                   "%s", gov->gov.name);
        if (!ret)
@@ -519,29 +480,21 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
        struct dbs_governor *gov = dbs_governor_of(policy);
        struct policy_dbs_info *policy_dbs = policy->governor_data;
        struct dbs_data *dbs_data = policy_dbs->dbs_data;
-       int count;
+       unsigned int count;
 
        /* Protect gov->gdbs_data against concurrent updates. */
        mutex_lock(&gov_dbs_data_mutex);
 
-       mutex_lock(&dbs_data->mutex);
-       list_del(&policy_dbs->list);
-       count = --dbs_data->usage_count;
-       mutex_unlock(&dbs_data->mutex);
+       count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
 
-       if (!count) {
-               kobject_put(&dbs_data->kobj);
-
-               policy->governor_data = NULL;
+       policy->governor_data = NULL;
 
+       if (!count) {
                if (!have_governor_per_policy())
                        gov->gdbs_data = NULL;
 
                gov->exit(dbs_data, policy->governor->initialized == 1);
-               mutex_destroy(&dbs_data->mutex);
                kfree(dbs_data);
-       } else {
-               policy->governor_data = NULL;
        }
 
        free_policy_dbs_info(policy_dbs, gov);
@@ -570,12 +523,12 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
 
        for_each_cpu(j, policy->cpus) {
                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
-               unsigned int prev_load;
 
-               j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
-
-               prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
-               j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
+               j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
+               /*
+                * Make the first invocation of dbs_update() compute the load.
+                */
+               j_cdbs->prev_load = 0;
 
                if (ignore_nice)
                        j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
index 61ff82fe0613296678532621847e5cd42361ad33..34eb214b6d57a4597727f7d21c8791cc38faa276 100644 (file)
 #include <linux/module.h>
 #include <linux/mutex.h>
 
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * governor will work on any processor with transition latency <= 10ms, using
- * appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work. All times here are in us (micro seconds).
- */
-#define MIN_SAMPLING_RATE_RATIO                        (2)
-#define LATENCY_MULTIPLIER                     (1000)
-#define MIN_LATENCY_MULTIPLIER                 (20)
-#define TRANSITION_LATENCY_LIMIT               (10 * 1000 * 1000)
-
 /* Ondemand Sampling types */
 enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
 
@@ -52,7 +38,7 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
 
 /* Governor demand based switching data (per-policy or global). */
 struct dbs_data {
-       int usage_count;
+       struct gov_attr_set attr_set;
        void *tuners;
        unsigned int min_sampling_rate;
        unsigned int ignore_nice_load;
@@ -60,37 +46,27 @@ struct dbs_data {
        unsigned int sampling_down_factor;
        unsigned int up_threshold;
        unsigned int io_is_busy;
-
-       struct kobject kobj;
-       struct list_head policy_dbs_list;
-       /*
-        * Protect concurrent updates to governor tunables from sysfs,
-        * policy_dbs_list and usage_count.
-        */
-       struct mutex mutex;
 };
 
-/* Governor's specific attributes */
-struct dbs_data;
-struct governor_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
-       ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
-                        size_t count);
-};
+static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
+{
+       return container_of(attr_set, struct dbs_data, attr_set);
+}
 
 #define gov_show_one(_gov, file_name)                                  \
 static ssize_t show_##file_name                                                \
-(struct dbs_data *dbs_data, char *buf)                                 \
+(struct gov_attr_set *attr_set, char *buf)                             \
 {                                                                      \
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);              \
        struct _gov##_dbs_tuners *tuners = dbs_data->tuners;            \
        return sprintf(buf, "%u\n", tuners->file_name);                 \
 }
 
 #define gov_show_one_common(file_name)                                 \
 static ssize_t show_##file_name                                                \
-(struct dbs_data *dbs_data, char *buf)                                 \
+(struct gov_attr_set *attr_set, char *buf)                             \
 {                                                                      \
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);              \
        return sprintf(buf, "%u\n", dbs_data->file_name);               \
 }
 
@@ -135,7 +111,7 @@ static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
 /* Per cpu structures */
 struct cpu_dbs_info {
        u64 prev_cpu_idle;
-       u64 prev_cpu_wall;
+       u64 prev_update_time;
        u64 prev_cpu_nice;
        /*
         * Used to keep track of load in the previous interval. However, when
@@ -184,7 +160,7 @@ void od_register_powersave_bias_handler(unsigned int (*f)
                (struct cpufreq_policy *, unsigned int, unsigned int),
                unsigned int powersave_bias);
 void od_unregister_powersave_bias_handler(void);
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
                            size_t count);
 void gov_update_cpu_data(struct dbs_data *dbs_data);
 #endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
new file mode 100644 (file)
index 0000000..52841f8
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+       return container_of(kobj, struct gov_attr_set, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+       return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+                            char *buf)
+{
+       struct governor_attr *gattr = to_gov_attr(attr);
+
+       return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+       struct governor_attr *gattr = to_gov_attr(attr);
+       int ret;
+
+       mutex_lock(&attr_set->update_lock);
+       ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+       mutex_unlock(&attr_set->update_lock);
+       return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+       .show   = governor_show,
+       .store  = governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+       INIT_LIST_HEAD(&attr_set->policy_list);
+       mutex_init(&attr_set->update_lock);
+       attr_set->usage_count = 1;
+       list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+       mutex_lock(&attr_set->update_lock);
+       attr_set->usage_count++;
+       list_add(list_node, &attr_set->policy_list);
+       mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+       unsigned int count;
+
+       mutex_lock(&attr_set->update_lock);
+       list_del(list_node);
+       count = --attr_set->usage_count;
+       mutex_unlock(&attr_set->update_lock);
+       if (count)
+               return count;
+
+       kobject_put(&attr_set->kobj);
+       mutex_destroy(&attr_set->update_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
index acd80272ded67b46ad651cb0eceb92bdf71e26d7..300163430516766d56b842d8d12dc33175eee66b 100644 (file)
@@ -207,9 +207,10 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
 /************************** sysfs interface ************************/
 static struct dbs_governor od_dbs_gov;
 
-static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+                               size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        unsigned int input;
        int ret;
 
@@ -224,9 +225,10 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
        return count;
 }
 
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+                                 const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        unsigned int input;
        int ret;
        ret = sscanf(buf, "%u", &input);
@@ -240,9 +242,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
        return count;
 }
 
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
-               const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+                                         const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
        unsigned int input;
        int ret;
@@ -254,7 +257,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
        dbs_data->sampling_down_factor = input;
 
        /* Reset down sampling multiplier in case it was active */
-       list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+       list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
                /*
                 * Doing this without locking might lead to using different
                 * rate_mult values in od_update() and od_dbs_timer().
@@ -267,9 +270,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
        return count;
 }
 
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
-               const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+                                     const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        unsigned int input;
        int ret;
 
@@ -291,9 +295,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
        return count;
 }
 
-static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+                                   const char *buf, size_t count)
 {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
        struct policy_dbs_info *policy_dbs;
        unsigned int input;
@@ -308,7 +313,7 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
 
        od_tuners->powersave_bias = input;
 
-       list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
+       list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
                ondemand_powersave_bias_init(policy_dbs->policy);
 
        return count;
index 4d16f45ee1daf3e64e23c97a9cec8b7a4ea0fc0f..9f3dec9a3f36db98407c743a0fc6493476b14bcf 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 
 static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
 static DEFINE_MUTEX(userspace_mutex);
@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
 static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
 {
        int ret = -EINVAL;
+       unsigned int *setspeed = policy->governor_data;
 
        pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
 
@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
        if (!per_cpu(cpu_is_managed, policy->cpu))
                goto err;
 
+       *setspeed = freq;
+
        ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
  err:
        mutex_unlock(&userspace_mutex);
@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
        return sprintf(buf, "%u\n", policy->cur);
 }
 
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+       unsigned int *setspeed;
+
+       setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+       if (!setspeed)
+               return -ENOMEM;
+
+       policy->governor_data = setspeed;
+       return 0;
+}
+
 static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
                                   unsigned int event)
 {
+       unsigned int *setspeed = policy->governor_data;
        unsigned int cpu = policy->cpu;
        int rc = 0;
 
+       if (event == CPUFREQ_GOV_POLICY_INIT)
+               return cpufreq_userspace_policy_init(policy);
+
+       if (!setspeed)
+               return -EINVAL;
+
        switch (event) {
+       case CPUFREQ_GOV_POLICY_EXIT:
+               mutex_lock(&userspace_mutex);
+               policy->governor_data = NULL;
+               kfree(setspeed);
+               mutex_unlock(&userspace_mutex);
+               break;
        case CPUFREQ_GOV_START:
                BUG_ON(!policy->cur);
                pr_debug("started managing cpu %u\n", cpu);
 
                mutex_lock(&userspace_mutex);
                per_cpu(cpu_is_managed, cpu) = 1;
+               *setspeed = policy->cur;
                mutex_unlock(&userspace_mutex);
                break;
        case CPUFREQ_GOV_STOP:
@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
 
                mutex_lock(&userspace_mutex);
                per_cpu(cpu_is_managed, cpu) = 0;
+               *setspeed = 0;
                mutex_unlock(&userspace_mutex);
                break;
        case CPUFREQ_GOV_LIMITS:
                mutex_lock(&userspace_mutex);
-               pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
-                       cpu, policy->min, policy->max,
-                       policy->cur);
+               pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+                       cpu, policy->min, policy->max, policy->cur, *setspeed);
 
-               if (policy->max < policy->cur)
+               if (policy->max < *setspeed)
                        __cpufreq_driver_target(policy, policy->max,
                                                CPUFREQ_RELATION_H);
-               else if (policy->min > policy->cur)
+               else if (policy->min > *setspeed)
                        __cpufreq_driver_target(policy, policy->min,
                                                CPUFREQ_RELATION_L);
+               else
+                       __cpufreq_driver_target(policy, *setspeed,
+                                               CPUFREQ_RELATION_L);
                mutex_unlock(&userspace_mutex);
                break;
        }
index 4085244c8a67075b7d4d14fda6e92d3c9d5280e3..cdf097b298623d6c0878ddf58a820578cbd41537 100644 (file)
@@ -6,6 +6,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -20,7 +22,7 @@
 #include <asm/msr.h>
 #include <asm/tsc.h>
 
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 #endif
@@ -33,7 +35,7 @@
 
 struct eps_cpu_data {
        u32 fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
        u32 bios_limit;
 #endif
        struct cpufreq_frequency_table freq_table[];
@@ -46,7 +48,7 @@ static int freq_failsafe_off;
 static int voltage_failsafe_off;
 static int set_max_voltage;
 
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 static int ignore_acpi_limit;
 
 static struct acpi_processor_performance *eps_acpi_cpu_perf;
@@ -141,11 +143,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
        /* Print voltage and multiplier */
        rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
        current_voltage = lo & 0xff;
-       printk(KERN_INFO "eps: Current voltage = %dmV\n",
-               current_voltage * 16 + 700);
+       pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
        current_multiplier = (lo >> 8) & 0xff;
-       printk(KERN_INFO "eps: Current multiplier = %d\n",
-               current_multiplier);
+       pr_info("Current multiplier = %d\n", current_multiplier);
        }
 #endif
        return 0;
@@ -166,7 +166,7 @@ static int eps_target(struct cpufreq_policy *policy, unsigned int index)
        dest_state = centaur->freq_table[index].driver_data & 0xffff;
        ret = eps_set_state(centaur, policy, dest_state);
        if (ret)
-               printk(KERN_ERR "eps: Timeout!\n");
+               pr_err("Timeout!\n");
        return ret;
 }
 
@@ -186,7 +186,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
        int k, step, voltage;
        int ret;
        int states;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
        unsigned int limit;
 #endif
 
@@ -194,36 +194,36 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
                return -ENODEV;
 
        /* Check brand */
-       printk(KERN_INFO "eps: Detected VIA ");
+       pr_info("Detected VIA ");
 
        switch (c->x86_model) {
        case 10:
                rdmsr(0x1153, lo, hi);
                brand = (((lo >> 2) ^ lo) >> 18) & 3;
-               printk(KERN_CONT "Model A ");
+               pr_cont("Model A ");
                break;
        case 13:
                rdmsr(0x1154, lo, hi);
                brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
-               printk(KERN_CONT "Model D ");
+               pr_cont("Model D ");
                break;
        }
 
        switch (brand) {
        case EPS_BRAND_C7M:
-               printk(KERN_CONT "C7-M\n");
+               pr_cont("C7-M\n");
                break;
        case EPS_BRAND_C7:
-               printk(KERN_CONT "C7\n");
+               pr_cont("C7\n");
                break;
        case EPS_BRAND_EDEN:
-               printk(KERN_CONT "Eden\n");
+               pr_cont("Eden\n");
                break;
        case EPS_BRAND_C7D:
-               printk(KERN_CONT "C7-D\n");
+               pr_cont("C7-D\n");
                break;
        case EPS_BRAND_C3:
-               printk(KERN_CONT "C3\n");
+               pr_cont("C3\n");
                return -ENODEV;
                break;
        }
@@ -235,7 +235,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
                /* Can be locked at 0 */
                rdmsrl(MSR_IA32_MISC_ENABLE, val);
                if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
-                       printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
+                       pr_info("Can't enable Enhanced PowerSaver\n");
                        return -ENODEV;
                }
        }
@@ -243,22 +243,19 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
        /* Print voltage and multiplier */
        rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
        current_voltage = lo & 0xff;
-       printk(KERN_INFO "eps: Current voltage = %dmV\n",
-                       current_voltage * 16 + 700);
+       pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
        current_multiplier = (lo >> 8) & 0xff;
-       printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
+       pr_info("Current multiplier = %d\n", current_multiplier);
 
        /* Print limits */
        max_voltage = hi & 0xff;
-       printk(KERN_INFO "eps: Highest voltage = %dmV\n",
-                       max_voltage * 16 + 700);
+       pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
        max_multiplier = (hi >> 8) & 0xff;
-       printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
+       pr_info("Highest multiplier = %d\n", max_multiplier);
        min_voltage = (hi >> 16) & 0xff;
-       printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
-                       min_voltage * 16 + 700);
+       pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
        min_multiplier = (hi >> 24) & 0xff;
-       printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
+       pr_info("Lowest multiplier = %d\n", min_multiplier);
 
        /* Sanity checks */
        if (current_multiplier == 0 || max_multiplier == 0
@@ -276,34 +273,30 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
 
        /* Check for systems using underclocked CPU */
        if (!freq_failsafe_off && max_multiplier != current_multiplier) {
-               printk(KERN_INFO "eps: Your processor is running at different "
-                       "frequency then its maximum. Aborting.\n");
-               printk(KERN_INFO "eps: You can use freq_failsafe_off option "
-                       "to disable this check.\n");
+               pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
+               pr_info("You can use freq_failsafe_off option to disable this check.\n");
                return -EINVAL;
        }
        if (!voltage_failsafe_off && max_voltage != current_voltage) {
-               printk(KERN_INFO "eps: Your processor is running at different "
-                       "voltage then its maximum. Aborting.\n");
-               printk(KERN_INFO "eps: You can use voltage_failsafe_off "
-                       "option to disable this check.\n");
+               pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
+               pr_info("You can use voltage_failsafe_off option to disable this check.\n");
                return -EINVAL;
        }
 
        /* Calc FSB speed */
        fsb = cpu_khz / current_multiplier;
 
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
        /* Check for ACPI processor speed limit */
        if (!ignore_acpi_limit && !eps_acpi_init()) {
                if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
-                       printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
+                       pr_info("ACPI limit %u.%uGHz\n",
                                limit/1000000,
                                (limit%1000000)/10000);
                        eps_acpi_exit(policy);
                        /* Check if max_multiplier is in BIOS limits */
                        if (limit && max_multiplier * fsb > limit) {
-                               printk(KERN_INFO "eps: Aborting.\n");
+                               pr_info("Aborting\n");
                                return -EINVAL;
                        }
                }
@@ -319,8 +312,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
                v = (set_max_voltage - 700) / 16;
                /* Check if voltage is within limits */
                if (v >= min_voltage && v <= max_voltage) {
-                       printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
-                               v * 16 + 700);
+                       pr_info("Setting %dmV as maximum\n", v * 16 + 700);
                        max_voltage = v;
                }
        }
@@ -341,7 +333,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
 
        /* Copy basic values */
        centaur->fsb = fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
        centaur->bios_limit = limit;
 #endif
 
@@ -426,7 +418,7 @@ module_param(freq_failsafe_off, int, 0644);
 MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
 module_param(voltage_failsafe_off, int, 0644);
 MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 module_param(ignore_acpi_limit, int, 0644);
 MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
 #endif
index 1c06e786c9baa53f587579ab932701e0a51d5a3f..bfce11cba1df8d92068b4095e1772ce38b7db12e 100644 (file)
@@ -16,6 +16,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -185,7 +187,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
 static int __init elanfreq_setup(char *str)
 {
        max_freq = simple_strtoul(str, &str, 0);
-       printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
+       pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
        return 1;
 }
 __setup("elanfreq=", elanfreq_setup);
diff --git a/drivers/cpufreq/hisi-acpu-cpufreq.c b/drivers/cpufreq/hisi-acpu-cpufreq.c
deleted file mode 100644 (file)
index 026d5b2..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Hisilicon Platforms Using ACPU CPUFreq Support
- *
- * Copyright (c) 2015 Hisilicon Limited.
- * Copyright (c) 2015 Linaro Limited.
- *
- * Leo Yan <leo.yan@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-static int __init hisi_acpu_cpufreq_driver_init(void)
-{
-       struct platform_device *pdev;
-
-       if (!of_machine_is_compatible("hisilicon,hi6220"))
-               return -ENODEV;
-
-       pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-       return PTR_ERR_OR_ZERO(pdev);
-}
-module_init(hisi_acpu_cpufreq_driver_init);
-
-MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
-MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
-MODULE_LICENSE("GPL v2");
index 0202429f1c5bbe01f0ec43dda5d5ad7c5a092d1a..759612da4fdcee8fa61a938d6d0c6ddfa5c0dfb1 100644 (file)
@@ -8,6 +8,8 @@
  *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -118,8 +120,7 @@ processor_get_freq (
 
        if (ret) {
                set_cpus_allowed_ptr(current, &saved_mask);
-               printk(KERN_WARNING "get performance failed with error %d\n",
-                      ret);
+               pr_warn("get performance failed with error %d\n", ret);
                ret = 0;
                goto migrate_end;
        }
@@ -177,7 +178,7 @@ processor_set_freq (
 
        ret = processor_set_pstate(value);
        if (ret) {
-               printk(KERN_WARNING "Transition failed with error %d\n", ret);
+               pr_warn("Transition failed with error %d\n", ret);
                retval = -ENODEV;
                goto migrate_end;
        }
@@ -291,8 +292,7 @@ acpi_cpufreq_cpu_init (
        /* notify BIOS that we exist */
        acpi_processor_notify_smm(THIS_MODULE);
 
-       printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
-              "activated.\n", cpu);
+       pr_info("CPU%u - ACPI performance management activated\n", cpu);
 
        for (i = 0; i < data->acpi_data.state_count; i++)
                pr_debug("     %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
index f502d5b90c2537b88a86b5224f21d175556be744..b76a98dd9988b4f553ddbdd47b663f8720e5fc89 100644 (file)
@@ -10,6 +10,8 @@
  * of the License.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
 #define ATOM_TURBO_RATIOS      0x66c
 #define ATOM_TURBO_VIDS                0x66d
 
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#endif
+
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
 #define fp_toint(X) ((X) >> FRAC_BITS)
 
+#define EXT_BITS 6
+#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+
 static inline int32_t mul_fp(int32_t x, int32_t y)
 {
        return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -64,12 +73,22 @@ static inline int ceiling_fp(int32_t x)
        return ret;
 }
 
+static inline u64 mul_ext_fp(u64 x, u64 y)
+{
+       return (x * y) >> EXT_FRAC_BITS;
+}
+
+static inline u64 div_ext_fp(u64 x, u64 y)
+{
+       return div64_u64(x << EXT_FRAC_BITS, y);
+}
+
 /**
  * struct sample -     Store performance sample
- * @core_pct_busy:     Ratio of APERF/MPERF in percent, which is actual
+ * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
  *                     performance during last sample period
  * @busy_scaled:       Scaled busy value which is used to calculate next
- *                     P state. This can be different than core_pct_busy
+ *                     P state. This can be different than core_avg_perf
  *                     to account for cpu idle period
  * @aperf:             Difference of actual performance frequency clock count
  *                     read from APERF MSR between last and current sample
@@ -84,7 +103,7 @@ static inline int ceiling_fp(int32_t x)
  * data for choosing next P State.
  */
 struct sample {
-       int32_t core_pct_busy;
+       int32_t core_avg_perf;
        int32_t busy_scaled;
        u64 aperf;
        u64 mperf;
@@ -162,6 +181,7 @@ struct _pid {
  * struct cpudata -    Per CPU instance data storage
  * @cpu:               CPU number for this instance data
  * @update_util:       CPUFreq utility callback information
+ * @update_util_set:   CPUFreq utility callback is set
  * @pstate:            Stores P state limits for this CPU
  * @vid:               Stores VID limits for this CPU
  * @pid:               Stores PID parameters for this CPU
@@ -172,6 +192,8 @@ struct _pid {
  * @prev_cummulative_iowait: IO Wait time difference from last and
  *                     current sample
  * @sample:            Storage for storing last Sample data
+ * @acpi_perf_data:    Stores ACPI perf information read from _PSS
+ * @valid_pss_table:   Set to true for valid ACPI _PSS entries found
  *
  * This structure stores per CPU instance data for all CPUs.
  */
@@ -179,6 +201,7 @@ struct cpudata {
        int cpu;
 
        struct update_util_data update_util;
+       bool   update_util_set;
 
        struct pstate_data pstate;
        struct vid_data vid;
@@ -190,6 +213,10 @@ struct cpudata {
        u64     prev_tsc;
        u64     prev_cummulative_iowait;
        struct sample sample;
+#ifdef CONFIG_ACPI
+       struct acpi_processor_performance acpi_perf_data;
+       bool valid_pss_table;
+#endif
 };
 
 static struct cpudata **all_cpu_data;
@@ -258,6 +285,9 @@ static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
 
+#ifdef CONFIG_ACPI
+static bool acpi_ppc;
+#endif
 
 /**
  * struct perf_limits - Store user and policy limits
@@ -331,6 +361,124 @@ static struct perf_limits *limits = &performance_limits;
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
+#ifdef CONFIG_ACPI
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+       if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
+           acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
+               return true;
+
+       return acpi_ppc;
+}
+
+/*
+ * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
+ * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
+ * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
+ * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
+ * target ratio 0x17. The _PSS control value stores in a format which can be
+ * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
+ * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
+ * This function converts the _PSS control value to intel pstate driver format
+ * for comparison and assignment.
+ */
+static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
+{
+       return cpu->acpi_perf_data.states[index].control >> 8;
+}
+
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+       struct cpudata *cpu;
+       int turbo_pss_ctl;
+       int ret;
+       int i;
+
+       if (hwp_active)
+               return;
+
+       if (!intel_pstate_get_ppc_enable_status())
+               return;
+
+       cpu = all_cpu_data[policy->cpu];
+
+       ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+                                                 policy->cpu);
+       if (ret)
+               return;
+
+       /*
+        * Check if the control value in _PSS is for PERF_CTL MSR, which should
+        * guarantee that the states returned by it map to the states in our
+        * list directly.
+        */
+       if (cpu->acpi_perf_data.control_register.space_id !=
+                                               ACPI_ADR_SPACE_FIXED_HARDWARE)
+               goto err;
+
+       /*
+        * If there is only one entry _PSS, simply ignore _PSS and continue as
+        * usual without taking _PSS into account
+        */
+       if (cpu->acpi_perf_data.state_count < 2)
+               goto err;
+
+       pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
+       for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+               pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
+                        (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+                        (u32) cpu->acpi_perf_data.states[i].core_frequency,
+                        (u32) cpu->acpi_perf_data.states[i].power,
+                        (u32) cpu->acpi_perf_data.states[i].control);
+       }
+
+       /*
+        * The _PSS table doesn't contain whole turbo frequency range.
+        * This just contains +1 MHZ above the max non turbo frequency,
+        * with control value corresponding to max turbo ratio. But
+        * when cpufreq set policy is called, it will call with this
+        * max frequency, which will cause a reduced performance as
+        * this driver uses real max turbo frequency as the max
+        * frequency. So correct this frequency in _PSS table to
+        * correct max turbo frequency based on the turbo ratio.
+        * Also need to convert to MHz as _PSS freq is in MHz.
+        */
+       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
+       if (turbo_pss_ctl > cpu->pstate.max_pstate)
+               cpu->acpi_perf_data.states[0].core_frequency =
+                                       policy->cpuinfo.max_freq / 1000;
+       cpu->valid_pss_table = true;
+       pr_info("_PPC limits will be enforced\n");
+
+       return;
+
+ err:
+       cpu->valid_pss_table = false;
+       acpi_processor_unregister_performance(policy->cpu);
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+       struct cpudata *cpu;
+
+       cpu = all_cpu_data[policy->cpu];
+       if (!cpu->valid_pss_table)
+               return;
+
+       acpi_processor_unregister_performance(policy->cpu);
+}
+
+#else
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+}
+#endif
+
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
                             int deadband, int integral) {
        pid->setpoint = int_tofp(setpoint);
@@ -341,17 +489,17 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
 
 static inline void pid_p_gain_set(struct _pid *pid, int percent)
 {
-       pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->p_gain = div_fp(percent, 100);
 }
 
 static inline void pid_i_gain_set(struct _pid *pid, int percent)
 {
-       pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->i_gain = div_fp(percent, 100);
 }
 
 static inline void pid_d_gain_set(struct _pid *pid, int percent)
 {
-       pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->d_gain = div_fp(percent, 100);
 }
 
 static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -453,6 +601,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
        }
 }
 
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+       if (hwp_active)
+               intel_pstate_hwp_set(policy->cpus);
+
+       return 0;
+}
+
 static void intel_pstate_hwp_set_online_cpus(void)
 {
        get_online_cpus();
@@ -529,7 +685,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
 
        total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
        no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
-       turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+       turbo_fp = div_fp(no_turbo, total);
        turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
        return sprintf(buf, "%u\n", turbo_pct);
 }
@@ -571,7 +727,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 
        update_turbo_state();
        if (limits->turbo_disabled) {
-               pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                return -EPERM;
        }
 
@@ -600,8 +756,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->max_perf_pct);
        limits->max_perf_pct = max(limits->min_perf_pct,
                                   limits->max_perf_pct);
-       limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-                                 int_tofp(100));
+       limits->max_perf = div_fp(limits->max_perf_pct, 100);
 
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@ -625,8 +780,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->min_perf_pct);
        limits->min_perf_pct = min(limits->max_perf_pct,
                                   limits->min_perf_pct);
-       limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-                                 int_tofp(100));
+       limits->min_perf = div_fp(limits->min_perf_pct, 100);
 
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@ -1011,15 +1165,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        intel_pstate_set_min_pstate(cpu);
 }
 
-static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
 {
        struct sample *sample = &cpu->sample;
-       int64_t core_pct;
-
-       core_pct = int_tofp(sample->aperf) * int_tofp(100);
-       core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
 
-       sample->core_pct_busy = (int32_t)core_pct;
+       sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
 }
 
 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
@@ -1062,8 +1212,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
 {
-       return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
-               cpu->pstate.scaling, cpu->sample.mperf);
+       return mul_ext_fp(cpu->sample.core_avg_perf,
+                         cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+}
+
+static inline int32_t get_avg_pstate(struct cpudata *cpu)
+{
+       return mul_ext_fp(cpu->pstate.max_pstate_physical,
+                         cpu->sample.core_avg_perf);
 }
 
 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1098,51 +1254,43 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
        cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
        cpu->sample.busy_scaled = cpu_load;
 
-       return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
+       return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
 }
 
 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
 {
-       int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+       int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
        u64 duration_ns;
 
-       intel_pstate_calc_busy(cpu);
-
        /*
-        * core_busy is the ratio of actual performance to max
-        * max_pstate is the max non turbo pstate available
-        * current_pstate was the pstate that was requested during
-        *      the last sample period.
-        *
-        * We normalize core_busy, which was our actual percent
-        * performance to what we requested during the last sample
-        * period. The result will be a percentage of busy at a
-        * specified pstate.
+        * perf_scaled is the average performance during the last sampling
+        * period scaled by the ratio of the maximum P-state to the P-state
+        * requested last time (in percent).  That measures the system's
+        * response to the previous P-state selection.
         */
-       core_busy = cpu->sample.core_pct_busy;
-       max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
-       current_pstate = int_tofp(cpu->pstate.current_pstate);
-       core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+       max_pstate = cpu->pstate.max_pstate_physical;
+       current_pstate = cpu->pstate.current_pstate;
+       perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
+                              div_fp(100 * max_pstate, current_pstate));
 
        /*
         * Since our utilization update callback will not run unless we are
         * in C0, check if the actual elapsed time is significantly greater (3x)
         * than our sample interval.  If it is, then we were idle for a long
-        * enough period of time to adjust our busyness.
+        * enough period of time to adjust our performance metric.
         */
        duration_ns = cpu->sample.time - cpu->last_sample_time;
        if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
-               sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
-                                     int_tofp(duration_ns));
-               core_busy = mul_fp(core_busy, sample_ratio);
+               sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
+               perf_scaled = mul_fp(perf_scaled, sample_ratio);
        } else {
                sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
                if (sample_ratio < int_tofp(1))
-                       core_busy = 0;
+                       perf_scaled = 0;
        }
 
-       cpu->sample.busy_scaled = core_busy;
-       return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
+       cpu->sample.busy_scaled = perf_scaled;
+       return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
 }
 
 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1172,7 +1320,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
        intel_pstate_update_pstate(cpu, target_pstate);
 
        sample = &cpu->sample;
-       trace_pstate_sample(fp_toint(sample->core_pct_busy),
+       trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
                fp_toint(sample->busy_scaled),
                from,
                cpu->pstate.current_pstate,
@@ -1191,8 +1339,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
        if ((s64)delta_ns >= pid_params.sample_rate_ns) {
                bool sample_taken = intel_pstate_sample(cpu, time);
 
-               if (sample_taken && !hwp_active)
-                       intel_pstate_adjust_busy_pstate(cpu);
+               if (sample_taken) {
+                       intel_pstate_calc_avg_perf(cpu);
+                       if (!hwp_active)
+                               intel_pstate_adjust_busy_pstate(cpu);
+               }
        }
 }
 
@@ -1251,23 +1402,16 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 
        intel_pstate_busy_pid_reset(cpu);
 
-       cpu->update_util.func = intel_pstate_update_util;
-
-       pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+       pr_debug("controlling: cpu %d\n", cpunum);
 
        return 0;
 }
 
 static unsigned int intel_pstate_get(unsigned int cpu_num)
 {
-       struct sample *sample;
-       struct cpudata *cpu;
+       struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       cpu = all_cpu_data[cpu_num];
-       if (!cpu)
-               return 0;
-       sample = &cpu->sample;
-       return get_avg_frequency(cpu);
+       return cpu ? get_avg_frequency(cpu) : 0;
 }
 
 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
@@ -1276,12 +1420,20 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
 
        /* Prevent intel_pstate_update_util() from using stale data. */
        cpu->sample.time = 0;
-       cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+       cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+                                    intel_pstate_update_util);
+       cpu->update_util_set = true;
 }
 
 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
 {
-       cpufreq_set_update_util_data(cpu, NULL);
+       struct cpudata *cpu_data = all_cpu_data[cpu];
+
+       if (!cpu_data->update_util_set)
+               return;
+
+       cpufreq_remove_update_util_hook(cpu);
+       cpu_data->update_util_set = false;
        synchronize_sched();
 }
 
@@ -1301,20 +1453,31 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
+       struct cpudata *cpu;
+
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
        intel_pstate_clear_update_util_hook(policy->cpu);
 
+       cpu = all_cpu_data[0];
+       if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
+               if (policy->max < policy->cpuinfo.max_freq &&
+                   policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+                       pr_debug("policy->max > max non turbo frequency\n");
+                       policy->max = policy->cpuinfo.max_freq;
+               }
+       }
+
        if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
                if (policy->max >= policy->cpuinfo.max_freq) {
-                       pr_debug("intel_pstate: set performance\n");
+                       pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(limits);
                        goto out;
                }
        } else {
-               pr_debug("intel_pstate: set powersave\n");
+               pr_debug("set powersave\n");
                limits = &powersave_limits;
        }
 
@@ -1338,16 +1501,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
-       limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-                                 int_tofp(100));
-       limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-                                 int_tofp(100));
+       limits->min_perf = div_fp(limits->min_perf_pct, 100);
+       limits->max_perf = div_fp(limits->max_perf_pct, 100);
 
  out:
        intel_pstate_set_update_util_hook(policy->cpu);
 
-       if (hwp_active)
-               intel_pstate_hwp_set(policy->cpus);
+       intel_pstate_hwp_set_policy(policy);
 
        return 0;
 }
@@ -1368,7 +1528,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
        int cpu_num = policy->cpu;
        struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+       pr_debug("CPU %d exiting\n", cpu_num);
 
        intel_pstate_clear_update_util_hook(cpu_num);
 
@@ -1401,18 +1561,28 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->cpuinfo.max_freq =
                cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       intel_pstate_init_acpi_perf_limits(policy);
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
 
        return 0;
 }
 
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+       intel_pstate_exit_perf_limits(policy);
+
+       return 0;
+}
+
 static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
+       .resume         = intel_pstate_hwp_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
+       .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
 };
@@ -1456,8 +1626,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 
 }
 
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
+#ifdef CONFIG_ACPI
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1613,7 +1782,7 @@ hwp_cpu_matched:
        if (intel_pstate_platform_pwr_mgmt_exists())
                return -ENODEV;
 
-       pr_info("Intel P-state driver initializing.\n");
+       pr_info("Intel P-state driver initializing\n");
 
        all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
@@ -1630,7 +1799,7 @@ hwp_cpu_matched:
        intel_pstate_sysfs_expose_params();
 
        if (hwp_active)
-               pr_info("intel_pstate: HWP enabled\n");
+               pr_info("HWP enabled\n");
 
        return rc;
 out:
@@ -1656,13 +1825,19 @@ static int __init intel_pstate_setup(char *str)
        if (!strcmp(str, "disable"))
                no_load = 1;
        if (!strcmp(str, "no_hwp")) {
-               pr_info("intel_pstate: HWP disabled\n");
+               pr_info("HWP disabled\n");
                no_hwp = 1;
        }
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
                hwp_only = 1;
+
+#ifdef CONFIG_ACPI
+       if (!strcmp(str, "support_acpi_ppc"))
+               acpi_ppc = true;
+#endif
+
        return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
index 0f6b229afcb9e621eb116f6ce805a619ddfe96b3..beae5cf5c62c114af3c188dd21d5356e2b869141 100644 (file)
@@ -21,6 +21,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -40,8 +42,6 @@
 
 #include "longhaul.h"
 
-#define PFX "longhaul: "
-
 #define TYPE_LONGHAUL_V1       1
 #define TYPE_LONGHAUL_V2       2
 #define TYPE_POWERSAVER                3
@@ -347,14 +347,13 @@ retry_loop:
        freqs.new = calc_speed(longhaul_get_cpu_mult());
        /* Check if requested frequency is set. */
        if (unlikely(freqs.new != speed)) {
-               printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+               pr_info("Failed to set requested frequency!\n");
                /* Revision ID = 1 but processor is expecting revision key
                 * equal to 0. Jumpers at the bottom of processor will change
                 * multiplier and FSB, but will not change bits in Longhaul
                 * MSR nor enable voltage scaling. */
                if (!revid_errata) {
-                       printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
-                                               "option.\n");
+                       pr_info("Enabling \"Ignore Revision ID\" option\n");
                        revid_errata = 1;
                        msleep(200);
                        goto retry_loop;
@@ -364,11 +363,10 @@ retry_loop:
                 * but it doesn't change frequency. I tried poking various
                 * bits in northbridge registers, but without success. */
                if (longhaul_flags & USE_ACPI_C3) {
-                       printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+                       pr_info("Disabling ACPI C3 support\n");
                        longhaul_flags &= ~USE_ACPI_C3;
                        if (revid_errata) {
-                               printk(KERN_INFO PFX "Disabling \"Ignore "
-                                               "Revision ID\" option.\n");
+                               pr_info("Disabling \"Ignore Revision ID\" option\n");
                                revid_errata = 0;
                        }
                        msleep(200);
@@ -379,7 +377,7 @@ retry_loop:
                 * RevID = 1. RevID errata will make things right. Just
                 * to be 100% sure. */
                if (longhaul_version == TYPE_LONGHAUL_V2) {
-                       printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+                       pr_info("Switching to Longhaul ver. 1\n");
                        longhaul_version = TYPE_LONGHAUL_V1;
                        msleep(200);
                        goto retry_loop;
@@ -387,8 +385,7 @@ retry_loop:
        }
 
        if (!bm_timeout) {
-               printk(KERN_INFO PFX "Warning: Timeout while waiting for "
-                               "idle PCI bus.\n");
+               pr_info("Warning: Timeout while waiting for idle PCI bus\n");
                return -EBUSY;
        }
 
@@ -433,12 +430,12 @@ static int longhaul_get_ranges(void)
        /* Get current frequency */
        mult = longhaul_get_cpu_mult();
        if (mult == -1) {
-               printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
+               pr_info("Invalid (reserved) multiplier!\n");
                return -EINVAL;
        }
        fsb = guess_fsb(mult);
        if (fsb == 0) {
-               printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
+               pr_info("Invalid (reserved) FSB!\n");
                return -EINVAL;
        }
        /* Get max multiplier - as we always did.
@@ -468,11 +465,11 @@ static int longhaul_get_ranges(void)
                 print_speed(highest_speed/1000));
 
        if (lowest_speed == highest_speed) {
-               printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
+               pr_info("highestspeed == lowest, aborting\n");
                return -EINVAL;
        }
        if (lowest_speed > highest_speed) {
-               printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
+               pr_info("nonsense! lowest (%d > %d) !\n",
                        lowest_speed, highest_speed);
                return -EINVAL;
        }
@@ -538,16 +535,16 @@ static void longhaul_setup_voltagescaling(void)
 
        rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
        if (!(longhaul.bits.RevisionID & 1)) {
-               printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
+               pr_info("Voltage scaling not supported by CPU\n");
                return;
        }
 
        if (!longhaul.bits.VRMRev) {
-               printk(KERN_INFO PFX "VRM 8.5\n");
+               pr_info("VRM 8.5\n");
                vrm_mV_table = &vrm85_mV[0];
                mV_vrm_table = &mV_vrm85[0];
        } else {
-               printk(KERN_INFO PFX "Mobile VRM\n");
+               pr_info("Mobile VRM\n");
                if (cpu_model < CPU_NEHEMIAH)
                        return;
                vrm_mV_table = &mobilevrm_mV[0];
@@ -558,27 +555,21 @@ static void longhaul_setup_voltagescaling(void)
        maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
 
        if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
-               printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
-                                       "Voltage scaling disabled.\n",
-                                       minvid.mV/1000, minvid.mV%1000,
-                                       maxvid.mV/1000, maxvid.mV%1000);
+               pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
+                       minvid.mV/1000, minvid.mV%1000,
+                       maxvid.mV/1000, maxvid.mV%1000);
                return;
        }
 
        if (minvid.mV == maxvid.mV) {
-               printk(KERN_INFO PFX "Claims to support voltage scaling but "
-                               "min & max are both %d.%03d. "
-                               "Voltage scaling disabled\n",
-                               maxvid.mV/1000, maxvid.mV%1000);
+               pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
+                       maxvid.mV/1000, maxvid.mV%1000);
                return;
        }
 
        /* How many voltage steps*/
        numvscales = maxvid.pos - minvid.pos + 1;
-       printk(KERN_INFO PFX
-               "Max VID=%d.%03d  "
-               "Min VID=%d.%03d, "
-               "%d possible voltage scales\n",
+       pr_info("Max VID=%d.%03d  Min VID=%d.%03d, %d possible voltage scales\n",
                maxvid.mV/1000, maxvid.mV%1000,
                minvid.mV/1000, minvid.mV%1000,
                numvscales);
@@ -617,12 +608,12 @@ static void longhaul_setup_voltagescaling(void)
                        pos = minvid.pos;
                freq_pos->driver_data |= mV_vrm_table[pos] << 8;
                vid = vrm_mV_table[mV_vrm_table[pos]];
-               printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
+               pr_info("f: %d kHz, index: %d, vid: %d mV\n",
                        speed, (int)(freq_pos - longhaul_table), vid.mV);
        }
 
        can_scale_voltage = 1;
-       printk(KERN_INFO PFX "Voltage scaling enabled.\n");
+       pr_info("Voltage scaling enabled\n");
 }
 
 
@@ -720,8 +711,7 @@ static int enable_arbiter_disable(void)
                        pci_write_config_byte(dev, reg, pci_cmd);
                        pci_read_config_byte(dev, reg, &pci_cmd);
                        if (!(pci_cmd & 1<<7)) {
-                               printk(KERN_ERR PFX
-                                       "Can't enable access to port 0x22.\n");
+                               pr_err("Can't enable access to port 0x22\n");
                                status = 0;
                        }
                }
@@ -758,8 +748,7 @@ static int longhaul_setup_southbridge(void)
                if (pci_cmd & 1 << 7) {
                        pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
                        acpi_regs_addr &= 0xff00;
-                       printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
-                                       acpi_regs_addr);
+                       pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
                }
 
                pci_dev_put(dev);
@@ -853,14 +842,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
                        longhaul_version = TYPE_LONGHAUL_V1;
        }
 
-       printk(KERN_INFO PFX "VIA %s CPU detected.  ", cpuname);
+       pr_info("VIA %s CPU detected.  ", cpuname);
        switch (longhaul_version) {
        case TYPE_LONGHAUL_V1:
        case TYPE_LONGHAUL_V2:
-               printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
+               pr_cont("Longhaul v%d supported\n", longhaul_version);
                break;
        case TYPE_POWERSAVER:
-               printk(KERN_CONT "Powersaver supported.\n");
+               pr_cont("Powersaver supported\n");
                break;
        };
 
@@ -889,15 +878,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
        if (!(longhaul_flags & USE_ACPI_C3
             || longhaul_flags & USE_NORTHBRIDGE)
            && ((pr == NULL) || !(pr->flags.bm_control))) {
-               printk(KERN_ERR PFX
-                       "No ACPI support. Unsupported northbridge.\n");
+               pr_err("No ACPI support: Unsupported northbridge\n");
                return -ENODEV;
        }
 
        if (longhaul_flags & USE_NORTHBRIDGE)
-               printk(KERN_INFO PFX "Using northbridge support.\n");
+               pr_info("Using northbridge support\n");
        if (longhaul_flags & USE_ACPI_C3)
-               printk(KERN_INFO PFX "Using ACPI support.\n");
+               pr_info("Using ACPI support\n");
 
        ret = longhaul_get_ranges();
        if (ret != 0)
@@ -934,20 +922,18 @@ static int __init longhaul_init(void)
                return -ENODEV;
 
        if (!enable) {
-               printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+               pr_err("Option \"enable\" not set - Aborting\n");
                return -ENODEV;
        }
 #ifdef CONFIG_SMP
        if (num_online_cpus() > 1) {
-               printk(KERN_ERR PFX "More than 1 CPU detected, "
-                               "longhaul disabled.\n");
+               pr_err("More than 1 CPU detected, longhaul disabled\n");
                return -ENODEV;
        }
 #endif
 #ifdef CONFIG_X86_IO_APIC
        if (cpu_has_apic) {
-               printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
-                               "broken in this configuration.\n");
+               pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
                return -ENODEV;
        }
 #endif
@@ -955,7 +941,7 @@ static int __init longhaul_init(void)
        case 6 ... 9:
                return cpufreq_register_driver(&longhaul_driver);
        case 10:
-               printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
+               pr_err("Use acpi-cpufreq driver for VIA C7\n");
        default:
                ;
        }
index cd593c1f66dc8af8a6208933003783e0f37b7392..6bbdac1065ff544dcf7ccc1b5b67971513718288 100644 (file)
@@ -10,6 +10,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/err.h>
@@ -76,7 +79,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        cpuclk = clk_get(NULL, "cpu_clk");
        if (IS_ERR(cpuclk)) {
-               printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+               pr_err("couldn't get CPU clk\n");
                return PTR_ERR(cpuclk);
        }
 
@@ -163,7 +166,7 @@ static int __init cpufreq_init(void)
        if (ret)
                return ret;
 
-       pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+       pr_info("Loongson-2F CPU frequency driver\n");
 
        cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
                                  CPUFREQ_TRANSITION_NOTIFIER);
index cc3408fc073f982110b446684cf8ed5dceb44bff..d9df89392b8439587fad331f5a728255b7f9050a 100644 (file)
@@ -13,6 +13,8 @@
 
 #undef DEBUG
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -174,7 +176,7 @@ static int __init maple_cpufreq_init(void)
        /* Get first CPU node */
        cpunode = of_cpu_device_node_get(0);
        if (cpunode == NULL) {
-               printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+               pr_err("Can't find any CPU 0 node\n");
                goto bail_noprops;
        }
 
@@ -182,8 +184,7 @@ static int __init maple_cpufreq_init(void)
        /* we actually don't care on which CPU to access PVR */
        pvr_hi = PVR_VER(mfspr(SPRN_PVR));
        if (pvr_hi != 0x3c && pvr_hi != 0x44) {
-               printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
-                               pvr_hi);
+               pr_err("Unsupported CPU version (%x)\n", pvr_hi);
                goto bail_noprops;
        }
 
@@ -222,8 +223,8 @@ static int __init maple_cpufreq_init(void)
        maple_pmode_cur = -1;
        maple_scom_switch_freq(maple_scom_query_freq());
 
-       printk(KERN_INFO "Registering Maple CPU frequency driver\n");
-       printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+       pr_info("Registering Maple CPU frequency driver\n");
+       pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
                maple_cpu_freqs[1].frequency/1000,
                maple_cpu_freqs[0].frequency/1000,
                maple_cpu_freqs[maple_pmode_cur].frequency/1000);
index 2058e6d292ce95fbcd6fb3099b08adf36f857cd1..6f602c7a71bd80fc6c8376574483e1bae788885f 100644 (file)
@@ -59,11 +59,8 @@ static LIST_HEAD(dvfs_info_list);
 static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
 {
        struct mtk_cpu_dvfs_info *info;
-       struct list_head *list;
-
-       list_for_each(list, &dvfs_info_list) {
-               info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
 
+       list_for_each_entry(info, &dvfs_info_list, list_head) {
                if (cpumask_test_cpu(cpu, &info->cpus))
                        return info;
        }
@@ -524,8 +521,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = {
 
 static int mt8173_cpufreq_probe(struct platform_device *pdev)
 {
-       struct mtk_cpu_dvfs_info *info;
-       struct list_head *list, *tmp;
+       struct mtk_cpu_dvfs_info *info, *tmp;
        int cpu, ret;
 
        for_each_possible_cpu(cpu) {
@@ -559,11 +555,9 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 release_dvfs_info_list:
-       list_for_each_safe(list, tmp, &dvfs_info_list) {
-               info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
-
+       list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
                mtk_cpu_dvfs_info_release(info);
-               list_del(list);
+               list_del(&info->list_head);
        }
 
        return ret;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
new file mode 100644 (file)
index 0000000..e920889
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * CPUFreq support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012-2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/resource.h>
+
+static int __init armada_xp_pmsu_cpufreq_init(void)
+{
+       struct device_node *np;
+       struct resource res;
+       int ret, cpu;
+
+       if (!of_machine_is_compatible("marvell,armadaxp"))
+               return 0;
+
+       /*
+        * In order to have proper cpufreq handling, we need to ensure
+        * that the Device Tree description of the CPU clock includes
+        * the definition of the PMU DFS registers. If not, we do not
+        * register the clock notifier and the cpufreq driver. This
+        * piece of code is only for compatibility with old Device
+        * Trees.
+        */
+       np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
+       if (!np)
+               return 0;
+
+       ret = of_address_to_resource(np, 1, &res);
+       if (ret) {
+               pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
+               of_node_put(np);
+               return 0;
+       }
+
+       of_node_put(np);
+
+       /*
+        * For each CPU, this loop registers the operating points
+        * supported (which are the nominal CPU frequency and half of
+        * it), and registers the clock notifier that will take care
+        * of doing the PMSU part of a frequency transition.
+        */
+       for_each_possible_cpu(cpu) {
+               struct device *cpu_dev;
+               struct clk *clk;
+               int ret;
+
+               cpu_dev = get_cpu_device(cpu);
+               if (!cpu_dev) {
+                       pr_err("Cannot get CPU %d\n", cpu);
+                       continue;
+               }
+
+               clk = clk_get(cpu_dev, 0);
+               if (IS_ERR(clk)) {
+                       pr_err("Cannot get clock for CPU %d\n", cpu);
+                       return PTR_ERR(clk);
+               }
+
+               /*
+                * In case of a failure of dev_pm_opp_add(), we don't
+                * bother with cleaning up the registered OPP (there's
+                * no function to do so), and simply cancel the
+                * registration of the cpufreq device.
+                */
+               ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
+               if (ret) {
+                       clk_put(clk);
+                       return ret;
+               }
+
+               ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
+               if (ret) {
+                       clk_put(clk);
+                       return ret;
+               }
+
+               ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
+                                                 cpumask_of(cpu_dev->id));
+               if (ret)
+                       dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+                               __func__, ret);
+       }
+
+       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+       return 0;
+}
+device_initcall(armada_xp_pmsu_cpufreq_init);
index e3866e0d5bf8ef8e9ee0bde64e1995792ada158b..cead9bec4843a18e0d564e4b5d3522d0681cd778 100644 (file)
@@ -13,6 +13,9 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -163,13 +166,13 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
 {
        mpu_dev = get_cpu_device(0);
        if (!mpu_dev) {
-               pr_warning("%s: unable to get the mpu device\n", __func__);
+               pr_warn("%s: unable to get the MPU device\n", __func__);
                return -EINVAL;
        }
 
        mpu_reg = regulator_get(mpu_dev, "vcc");
        if (IS_ERR(mpu_reg)) {
-               pr_warning("%s: unable to get MPU regulator\n", __func__);
+               pr_warn("%s: unable to get MPU regulator\n", __func__);
                mpu_reg = NULL;
        } else {
                /* 
index 5dd95dab580d1cf30135ff39ce2cfd81d9be9b7f..fd77812313f3ecd2ef485b6298141de322ce4d80 100644 (file)
@@ -20,6 +20,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -35,8 +37,6 @@
 
 #include "speedstep-lib.h"
 
-#define PFX    "p4-clockmod: "
-
 /*
  * Duty Cycle (3bits), note DC_DISABLE is not specified in
  * intel docs i just use it to mean disable
@@ -124,11 +124,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
        if (c->x86 == 0x06) {
                if (cpu_has(c, X86_FEATURE_EST))
-                       printk_once(KERN_WARNING PFX "Warning: EST-capable "
-                              "CPU detected. The acpi-cpufreq module offers "
-                              "voltage scaling in addition to frequency "
-                              "scaling. You should use that instead of "
-                              "p4-clockmod, if possible.\n");
+                       pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
                switch (c->x86_model) {
                case 0x0E: /* Core */
                case 0x0F: /* Core Duo */
@@ -152,11 +148,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
        p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
 
        if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
-               printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
-                      "The speedstep-ich or acpi cpufreq modules offer "
-                      "voltage scaling in addition of frequency scaling. "
-                      "You should use either one instead of p4-clockmod, "
-                      "if possible.\n");
+               pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
                return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
        }
 
@@ -265,8 +257,7 @@ static int __init cpufreq_p4_init(void)
 
        ret = cpufreq_register_driver(&p4clockmod_driver);
        if (!ret)
-               printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
-                               "Modulation available\n");
+               pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
 
        return ret;
 }
index 1f49d97a70ea164737570c3480a2dcac30265788..b7b576e53e926a5c8e94bd614b3a60b4d1c6f222 100644 (file)
@@ -13,6 +13,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -481,13 +483,13 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
                freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
                lenp /= sizeof(u32);
                if (freqs == NULL || lenp != 2) {
-                       printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+                       pr_err("bus-frequencies incorrect or missing\n");
                        return 1;
                }
                ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
                                                NULL);
                if (ratio == NULL) {
-                       printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+                       pr_err("processor-to-bus-ratio*2 missing\n");
                        return 1;
                }
 
@@ -550,7 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
        if (volt_gpio_np)
                voltage_gpio = read_gpio(volt_gpio_np);
        if (!voltage_gpio){
-               printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+               pr_err("missing cpu-vcore-select gpio\n");
                return 1;
        }
 
@@ -675,9 +677,9 @@ out:
        pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
        ppc_proc_freq = cur_freq * 1000ul;
 
-       printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
-       printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
-              low_freq/1000, hi_freq/1000, cur_freq/1000);
+       pr_info("Registering PowerMac CPU frequency driver\n");
+       pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+               low_freq/1000, hi_freq/1000, cur_freq/1000);
 
        return cpufreq_register_driver(&pmac_cpufreq_driver);
 }
index 4ff86878727fc0130ebf9be394f3f832c50c315d..267e0894c62d08643d1cf34bef94fd54c95daeb8 100644 (file)
@@ -12,6 +12,8 @@
 
 #undef DEBUG
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -138,7 +140,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
                usleep_range(1000, 1000);
        }
        if (done == 0)
-               printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+               pr_warn("Timeout in clock slewing !\n");
 }
 
 
@@ -266,7 +268,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
                rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
 
        if (rc)
-               printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
+               pr_warn("pfunc switch error %d\n", rc);
 
        /* It's an irq GPIO so we should be able to just block here,
         * I'll do that later after I've properly tested the IRQ code for
@@ -282,7 +284,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
                usleep_range(500, 500);
        }
        if (done == 0)
-               printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+               pr_warn("Timeout in clock slewing !\n");
 
        /* If frequency is going down, last ramp the voltage */
        if (speed_mode > g5_pmode_cur)
@@ -368,7 +370,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
        }
        pvr_hi = (*valp) >> 16;
        if (pvr_hi != 0x3c && pvr_hi != 0x44) {
-               printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
+               pr_err("Unsupported CPU version\n");
                goto bail_noprops;
        }
 
@@ -403,8 +405,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
 
                root = of_find_node_by_path("/");
                if (root == NULL) {
-                       printk(KERN_ERR "cpufreq: Can't find root of "
-                              "device tree\n");
+                       pr_err("Can't find root of device tree\n");
                        goto bail_noprops;
                }
                pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
@@ -412,8 +413,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
                        pmf_find_function(root, "slewing-done");
                if (pfunc_set_vdnap0 == NULL ||
                    pfunc_vdnap0_complete == NULL) {
-                       printk(KERN_ERR "cpufreq: Can't find required "
-                              "platform function\n");
+                       pr_err("Can't find required platform function\n");
                        goto bail_noprops;
                }
 
@@ -453,10 +453,10 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
        g5_pmode_cur = -1;
        g5_switch_freq(g5_query_freq());
 
-       printk(KERN_INFO "Registering G5 CPU frequency driver\n");
-       printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
-              freq_method, volt_method);
-       printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+       pr_info("Registering G5 CPU frequency driver\n");
+       pr_info("Frequency method: %s, Voltage method: %s\n",
+               freq_method, volt_method);
+       pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
                g5_cpu_freqs[1].frequency/1000,
                g5_cpu_freqs[0].frequency/1000,
                g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -493,7 +493,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
        if (cpuid != NULL)
                eeprom = of_get_property(cpuid, "cpuid", NULL);
        if (eeprom == NULL) {
-               printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+               pr_err("Can't find cpuid EEPROM !\n");
                rc = -ENODEV;
                goto bail;
        }
@@ -511,7 +511,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
                break;
        }
        if (hwclock == NULL) {
-               printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+               pr_err("Can't find i2c clock chip !\n");
                rc = -ENODEV;
                goto bail;
        }
@@ -539,7 +539,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
        /* Check we have minimum requirements */
        if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
            pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
-               printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+               pr_err("Can't find platform functions !\n");
                rc = -ENODEV;
                goto bail;
        }
@@ -567,7 +567,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
        /* Get max frequency from device-tree */
        valp = of_get_property(cpunode, "clock-frequency", NULL);
        if (!valp) {
-               printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+               pr_err("Can't find CPU frequency !\n");
                rc = -ENODEV;
                goto bail;
        }
@@ -583,8 +583,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
 
        /* Check for machines with no useful settings */
        if (il == ih) {
-               printk(KERN_WARNING "cpufreq: No low frequency mode available"
-                      " on this model !\n");
+               pr_warn("No low frequency mode available on this model !\n");
                rc = -ENODEV;
                goto bail;
        }
@@ -595,7 +594,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
 
        /* Sanity check */
        if (min_freq >= max_freq || min_freq < 1000) {
-               printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+               pr_err("Can't calculate low frequency !\n");
                rc = -ENXIO;
                goto bail;
        }
@@ -619,10 +618,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
        g5_pmode_cur = -1;
        g5_switch_freq(g5_query_freq());
 
-       printk(KERN_INFO "Registering G5 CPU frequency driver\n");
-       printk(KERN_INFO "Frequency method: i2c/pfunc, "
-              "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
-       printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+       pr_info("Registering G5 CPU frequency driver\n");
+       pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
+               has_volt ? "i2c/pfunc" : "none");
+       pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
                g5_cpu_freqs[1].frequency/1000,
                g5_cpu_freqs[0].frequency/1000,
                g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -654,7 +653,7 @@ static int __init g5_cpufreq_init(void)
        /* Get first CPU node */
        cpunode = of_cpu_device_node_get(0);
        if (cpunode == NULL) {
-               pr_err("cpufreq: Can't find any CPU node\n");
+               pr_err("Can't find any CPU node\n");
                return -ENODEV;
        }
 
index e6f24b281e3edcb5289202e5f149e88d9e5a92e6..dedd2568e8528290f622d52149d87d36dc164100 100644 (file)
@@ -8,6 +8,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -22,7 +24,6 @@
 #define POWERNOW_IOPORT 0xfff0          /* it doesn't matter where, as long
                                           as it is unused */
 
-#define PFX "powernow-k6: "
 static unsigned int                     busfreq;   /* FSB, in 10 kHz */
 static unsigned int                     max_multiplier;
 
@@ -141,7 +142,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
 {
 
        if (clock_ratio[best_i].driver_data > max_multiplier) {
-               printk(KERN_ERR PFX "invalid target frequency\n");
+               pr_err("invalid target frequency\n");
                return -EINVAL;
        }
 
@@ -175,13 +176,14 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
                                max_multiplier = param_max_multiplier;
                                goto have_max_multiplier;
                        }
-               printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+               pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
                return -EINVAL;
        }
 
        if (!max_multiplier) {
-               printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
-               printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+               pr_warn("unknown frequency %u, cannot determine current multiplier\n",
+                       khz);
+               pr_warn("use module parameters max_multiplier and bus_frequency\n");
                return -EOPNOTSUPP;
        }
 
@@ -193,7 +195,7 @@ have_max_multiplier:
                        busfreq = param_busfreq / 10;
                        goto have_busfreq;
                }
-               printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+               pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
                return -EINVAL;
        }
 
@@ -275,7 +277,7 @@ static int __init powernow_k6_init(void)
                return -ENODEV;
 
        if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
-               printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
+               pr_info("PowerNow IOPORT region already used\n");
                return -EIO;
        }
 
index c1ae1999770afbeb2d754601b7e520c198ae3510..9f013ed42977ef17d5606cebb38240a4aed03b15 100644 (file)
@@ -13,6 +13,8 @@
  *  - We disable half multipliers if ACPI is used on A0 stepping CPUs.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -35,9 +37,6 @@
 
 #include "powernow-k7.h"
 
-#define PFX "powernow: "
-
-
 struct psb_s {
        u8 signature[10];
        u8 tableversion;
@@ -127,14 +126,13 @@ static int check_powernow(void)
        maxei = cpuid_eax(0x80000000);
        if (maxei < 0x80000007) {       /* Any powernow info ? */
 #ifdef MODULE
-               printk(KERN_INFO PFX "No powernow capabilities detected\n");
+               pr_info("No powernow capabilities detected\n");
 #endif
                return 0;
        }
 
        if ((c->x86_model == 6) && (c->x86_mask == 0)) {
-               printk(KERN_INFO PFX "K7 660[A0] core detected, "
-                               "enabling errata workarounds\n");
+               pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
                have_a0 = 1;
        }
 
@@ -144,22 +142,22 @@ static int check_powernow(void)
        if (!(edx & (1 << 1 | 1 << 2)))
                return 0;
 
-       printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
+       pr_info("PowerNOW! Technology present. Can scale: ");
 
        if (edx & 1 << 1) {
-               printk("frequency");
+               pr_cont("frequency");
                can_scale_bus = 1;
        }
 
        if ((edx & (1 << 1 | 1 << 2)) == 0x6)
-               printk(" and ");
+               pr_cont(" and ");
 
        if (edx & 1 << 2) {
-               printk("voltage");
+               pr_cont("voltage");
                can_scale_vid = 1;
        }
 
-       printk(".\n");
+       pr_cont("\n");
        return 1;
 }
 
@@ -427,16 +425,14 @@ err1:
 err05:
        kfree(acpi_processor_perf);
 err0:
-       printk(KERN_WARNING PFX "ACPI perflib can not be used on "
-                       "this platform\n");
+       pr_warn("ACPI perflib can not be used on this platform\n");
        acpi_processor_perf = NULL;
        return retval;
 }
 #else
 static int powernow_acpi_init(void)
 {
-       printk(KERN_INFO PFX "no support for ACPI processor found."
-              "  Please recompile your kernel with ACPI processor\n");
+       pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
        return -EINVAL;
 }
 #endif
@@ -468,8 +464,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
                        psb = (struct psb_s *) p;
                        pr_debug("Table version: 0x%x\n", psb->tableversion);
                        if (psb->tableversion != 0x12) {
-                               printk(KERN_INFO PFX "Sorry, only v1.2 tables"
-                                               " supported right now\n");
+                               pr_info("Sorry, only v1.2 tables supported right now\n");
                                return -ENODEV;
                        }
 
@@ -481,10 +476,8 @@ static int powernow_decode_bios(int maxfid, int startvid)
 
                        latency = psb->settlingtime;
                        if (latency < 100) {
-                               printk(KERN_INFO PFX "BIOS set settling time "
-                                               "to %d microseconds. "
-                                               "Should be at least 100. "
-                                               "Correcting.\n", latency);
+                               pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
+                                       latency);
                                latency = 100;
                        }
                        pr_debug("Settling Time: %d microseconds.\n",
@@ -516,10 +509,9 @@ static int powernow_decode_bios(int maxfid, int startvid)
                                                p += 2;
                                }
                        }
-                       printk(KERN_INFO PFX "No PST tables match this cpuid "
-                                       "(0x%x)\n", etuple);
-                       printk(KERN_INFO PFX "This is indicative of a broken "
-                                       "BIOS.\n");
+                       pr_info("No PST tables match this cpuid (0x%x)\n",
+                               etuple);
+                       pr_info("This is indicative of a broken BIOS\n");
 
                        return -EINVAL;
                }
@@ -552,7 +544,7 @@ static int fixup_sgtc(void)
        sgtc = 100 * m * latency;
        sgtc = sgtc / 3;
        if (sgtc > 0xfffff) {
-               printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
+               pr_warn("SGTC too large %d\n", sgtc);
                sgtc = 0xfffff;
        }
        return sgtc;
@@ -574,14 +566,10 @@ static unsigned int powernow_get(unsigned int cpu)
 
 static int acer_cpufreq_pst(const struct dmi_system_id *d)
 {
-       printk(KERN_WARNING PFX
-               "%s laptop with broken PST tables in BIOS detected.\n",
+       pr_warn("%s laptop with broken PST tables in BIOS detected\n",
                d->ident);
-       printk(KERN_WARNING PFX
-               "You need to downgrade to 3A21 (09/09/2002), or try a newer "
-               "BIOS than 3A71 (01/20/2003)\n");
-       printk(KERN_WARNING PFX
-               "cpufreq scaling has been disabled as a result of this.\n");
+       pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
+       pr_warn("cpufreq scaling has been disabled as a result of this\n");
        return 0;
 }
 
@@ -616,40 +604,38 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
 
        fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
        if (!fsb) {
-               printk(KERN_WARNING PFX "can not determine bus frequency\n");
+               pr_warn("can not determine bus frequency\n");
                return -EINVAL;
        }
        pr_debug("FSB: %3dMHz\n", fsb/1000);
 
        if (dmi_check_system(powernow_dmi_table) || acpi_force) {
-               printk(KERN_INFO PFX "PSB/PST known to be broken.  "
-                               "Trying ACPI instead\n");
+               pr_info("PSB/PST known to be broken - trying ACPI instead\n");
                result = powernow_acpi_init();
        } else {
                result = powernow_decode_bios(fidvidstatus.bits.MFID,
                                fidvidstatus.bits.SVID);
                if (result) {
-                       printk(KERN_INFO PFX "Trying ACPI perflib\n");
+                       pr_info("Trying ACPI perflib\n");
                        maximum_speed = 0;
                        minimum_speed = -1;
                        latency = 0;
                        result = powernow_acpi_init();
                        if (result) {
-                               printk(KERN_INFO PFX
-                                       "ACPI and legacy methods failed\n");
+                               pr_info("ACPI and legacy methods failed\n");
                        }
                } else {
                        /* SGTC use the bus clock as timer */
                        latency = fixup_sgtc();
-                       printk(KERN_INFO PFX "SGTC: %d\n", latency);
+                       pr_info("SGTC: %d\n", latency);
                }
        }
 
        if (result)
                return result;
 
-       printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
-                               minimum_speed/1000, maximum_speed/1000);
+       pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
+               minimum_speed/1000, maximum_speed/1000);
 
        policy->cpuinfo.transition_latency =
                cpufreq_scale(2000000UL, fsb, latency);
index 39ac78c94be0f3c191aaf5868cd3c2154ab5fb59..54c45368e3f17d3333a124cbb378ccf0fefee9fe 100644 (file)
 #include <asm/reg.h>
 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
 #include <asm/opal.h>
+#include <linux/timer.h>
 
 #define POWERNV_MAX_PSTATES    256
 #define PMSR_PSAFE_ENABLE      (1UL << 30)
 #define PMSR_SPR_EM_DISABLE    (1UL << 31)
 #define PMSR_MAX(x)            ((x >> 32) & 0xFF)
 
+#define MAX_RAMP_DOWN_TIME                             5120
+/*
+ * On an idle system we want the global pstate to ramp-down from max value to
+ * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
+ * then ramp-down rapidly later on.
+ *
+ * This gives a percentage rampdown for time elapsed in milliseconds.
+ * ramp_down_percentage = ((ms * ms) >> 18)
+ *                     ~= 3.8 * (sec * sec)
+ *
+ * At 0 ms     ramp_down_percent = 0
+ * At 5120 ms  ramp_down_percent = 100
+ */
+#define ramp_down_percent(time)                ((time * time) >> 18)
+
+/* Interval after which the timer is queued to bring down global pstate */
+#define GPSTATE_TIMER_INTERVAL                         2000
+
+/**
+ * struct global_pstate_info - Per policy data structure to maintain history of
+ *                             global pstates
+ * @highest_lpstate:           The local pstate from which we are ramping down
+ * @elapsed_time:              Time in ms spent in ramping down from
+ *                             highest_lpstate
+ * @last_sampled_time:         Time from boot in ms when global pstates were
+ *                             last set
+ * @last_lpstate,last_gpstate: Last set values for local and global pstates
+ * @timer:                     Is used for ramping down if cpu goes idle for
+ *                             a long time with global pstate held high
+ * @gpstate_lock:              A spinlock to maintain synchronization between
+ *                             routines called by the timer handler and
+ *                             governer's target_index calls
+ */
+struct global_pstate_info {
+       int highest_lpstate;
+       unsigned int elapsed_time;
+       unsigned int last_sampled_time;
+       int last_lpstate;
+       int last_gpstate;
+       spinlock_t gpstate_lock;
+       struct timer_list timer;
+};
+
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
 static bool rebooting, throttled, occ_reset;
 
@@ -94,6 +138,17 @@ static struct powernv_pstate_info {
        int nr_pstates;
 } powernv_pstate_info;
 
+static inline void reset_gpstates(struct cpufreq_policy *policy)
+{
+       struct global_pstate_info *gpstates = policy->driver_data;
+
+       gpstates->highest_lpstate = 0;
+       gpstates->elapsed_time = 0;
+       gpstates->last_sampled_time = 0;
+       gpstates->last_lpstate = 0;
+       gpstates->last_gpstate = 0;
+}
+
 /*
  * Initialize the freq table based on data obtained
  * from the firmware passed via device-tree
@@ -285,6 +340,7 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
 struct powernv_smp_call_data {
        unsigned int freq;
        int pstate_id;
+       int gpstate_id;
 };
 
 /*
@@ -343,19 +399,21 @@ static unsigned int powernv_cpufreq_get(unsigned int cpu)
  * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
  * on this CPU should be present in freq_data->pstate_id.
  */
-static void set_pstate(void *freq_data)
+static void set_pstate(void *data)
 {
        unsigned long val;
-       unsigned long pstate_ul =
-               ((struct powernv_smp_call_data *) freq_data)->pstate_id;
+       struct powernv_smp_call_data *freq_data = data;
+       unsigned long pstate_ul = freq_data->pstate_id;
+       unsigned long gpstate_ul = freq_data->gpstate_id;
 
        val = get_pmspr(SPRN_PMCR);
        val = val & 0x0000FFFFFFFFFFFFULL;
 
        pstate_ul = pstate_ul & 0xFF;
+       gpstate_ul = gpstate_ul & 0xFF;
 
        /* Set both global(bits 56..63) and local(bits 48..55) PStates */
-       val = val | (pstate_ul << 56) | (pstate_ul << 48);
+       val = val | (gpstate_ul << 56) | (pstate_ul << 48);
 
        pr_debug("Setting cpu %d pmcr to %016lX\n",
                        raw_smp_processor_id(), val);
@@ -424,6 +482,111 @@ next:
        }
 }
 
+/**
+ * calc_global_pstate - Calculate global pstate
+ * @elapsed_time:      Elapsed time in milliseconds
+ * @local_pstate:      New local pstate
+ * @highest_lpstate:   pstate from which its ramping down
+ *
+ * Finds the appropriate global pstate based on the pstate from which its
+ * ramping down and the time elapsed in ramping down. It follows a quadratic
+ * equation which ensures that it reaches ramping down to pmin in 5sec.
+ */
+static inline int calc_global_pstate(unsigned int elapsed_time,
+                                    int highest_lpstate, int local_pstate)
+{
+       int pstate_diff;
+
+       /*
+        * Using ramp_down_percent we get the percentage of rampdown
+        * that we are expecting to be dropping. Difference between
+        * highest_lpstate and powernv_pstate_info.min will give a absolute
+        * number of how many pstates we will drop eventually by the end of
+        * 5 seconds, then just scale it get the number pstates to be dropped.
+        */
+       pstate_diff =  ((int)ramp_down_percent(elapsed_time) *
+                       (highest_lpstate - powernv_pstate_info.min)) / 100;
+
+       /* Ensure that global pstate is >= to local pstate */
+       if (highest_lpstate - pstate_diff < local_pstate)
+               return local_pstate;
+       else
+               return highest_lpstate - pstate_diff;
+}
+
+static inline void  queue_gpstate_timer(struct global_pstate_info *gpstates)
+{
+       unsigned int timer_interval;
+
+       /*
+        * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
+        * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
+        * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
+        * seconds of ramp down time.
+        */
+       if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
+            > MAX_RAMP_DOWN_TIME)
+               timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
+       else
+               timer_interval = GPSTATE_TIMER_INTERVAL;
+
+       mod_timer_pinned(&gpstates->timer, jiffies +
+                       msecs_to_jiffies(timer_interval));
+}
+
+/**
+ * gpstate_timer_handler
+ *
+ * @data: pointer to cpufreq_policy on which timer was queued
+ *
+ * This handler brings down the global pstate closer to the local pstate
+ * according quadratic equation. Queues a new timer if it is still not equal
+ * to local pstate
+ */
+void gpstate_timer_handler(unsigned long data)
+{
+       struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+       struct global_pstate_info *gpstates = policy->driver_data;
+       int gpstate_id;
+       unsigned int time_diff = jiffies_to_msecs(jiffies)
+                                       - gpstates->last_sampled_time;
+       struct powernv_smp_call_data freq_data;
+
+       if (!spin_trylock(&gpstates->gpstate_lock))
+               return;
+
+       gpstates->last_sampled_time += time_diff;
+       gpstates->elapsed_time += time_diff;
+       freq_data.pstate_id = gpstates->last_lpstate;
+
+       if ((gpstates->last_gpstate == freq_data.pstate_id) ||
+           (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+               gpstate_id = freq_data.pstate_id;
+               reset_gpstates(policy);
+               gpstates->highest_lpstate = freq_data.pstate_id;
+       } else {
+               gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+                                               gpstates->highest_lpstate,
+                                               freq_data.pstate_id);
+       }
+
+       /*
+        * If local pstate is equal to global pstate, rampdown is over
+        * So timer is not required to be queued.
+        */
+       if (gpstate_id != freq_data.pstate_id)
+               queue_gpstate_timer(gpstates);
+
+       freq_data.gpstate_id = gpstate_id;
+       gpstates->last_gpstate = freq_data.gpstate_id;
+       gpstates->last_lpstate = freq_data.pstate_id;
+
+       spin_unlock(&gpstates->gpstate_lock);
+
+       /* Timer may get migrated to a different cpu on cpu hot unplug */
+       smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+}
+
 /*
  * powernv_cpufreq_target_index: Sets the frequency corresponding to
  * the cpufreq table entry indexed by new_index on the cpus in the
@@ -433,6 +596,8 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
                                        unsigned int new_index)
 {
        struct powernv_smp_call_data freq_data;
+       unsigned int cur_msec, gpstate_id;
+       struct global_pstate_info *gpstates = policy->driver_data;
 
        if (unlikely(rebooting) && new_index != get_nominal_index())
                return 0;
@@ -440,28 +605,81 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
        if (!throttled)
                powernv_cpufreq_throttle_check(NULL);
 
+       cur_msec = jiffies_to_msecs(get_jiffies_64());
+
+       spin_lock(&gpstates->gpstate_lock);
        freq_data.pstate_id = powernv_freqs[new_index].driver_data;
 
+       if (!gpstates->last_sampled_time) {
+               gpstate_id = freq_data.pstate_id;
+               gpstates->highest_lpstate = freq_data.pstate_id;
+               goto gpstates_done;
+       }
+
+       if (gpstates->last_gpstate > freq_data.pstate_id) {
+               gpstates->elapsed_time += cur_msec -
+                                                gpstates->last_sampled_time;
+
+               /*
+                * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
+                * we should be resetting all global pstate related data. Set it
+                * equal to local pstate to start fresh.
+                */
+               if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
+                       reset_gpstates(policy);
+                       gpstates->highest_lpstate = freq_data.pstate_id;
+                       gpstate_id = freq_data.pstate_id;
+               } else {
+               /* Elaspsed_time is less than 5 seconds, continue to rampdown */
+                       gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+                                                       gpstates->highest_lpstate,
+                                                       freq_data.pstate_id);
+               }
+       } else {
+               reset_gpstates(policy);
+               gpstates->highest_lpstate = freq_data.pstate_id;
+               gpstate_id = freq_data.pstate_id;
+       }
+
+       /*
+        * If local pstate is equal to global pstate, rampdown is over
+        * So timer is not required to be queued.
+        */
+       if (gpstate_id != freq_data.pstate_id)
+               queue_gpstate_timer(gpstates);
+       else
+               del_timer_sync(&gpstates->timer);
+
+gpstates_done:
+       freq_data.gpstate_id = gpstate_id;
+       gpstates->last_sampled_time = cur_msec;
+       gpstates->last_gpstate = freq_data.gpstate_id;
+       gpstates->last_lpstate = freq_data.pstate_id;
+
+       spin_unlock(&gpstates->gpstate_lock);
+
        /*
         * Use smp_call_function to send IPI and execute the
         * mtspr on target CPU.  We could do that without IPI
         * if current CPU is within policy->cpus (core)
         */
        smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
-
        return 0;
 }
 
 static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       int base, i;
+       int base, i, ret;
+       struct kernfs_node *kn;
+       struct global_pstate_info *gpstates;
 
        base = cpu_first_thread_sibling(policy->cpu);
 
        for (i = 0; i < threads_per_core; i++)
                cpumask_set_cpu(base + i, policy->cpus);
 
-       if (!policy->driver_data) {
+       kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
+       if (!kn) {
                int ret;
 
                ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
@@ -470,13 +688,37 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
                                policy->cpu);
                        return ret;
                }
-               /*
-                * policy->driver_data is used as a flag for one-time
-                * creation of throttle sysfs files.
-                */
-               policy->driver_data = policy;
+       } else {
+               kernfs_put(kn);
        }
-       return cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+       gpstates =  kzalloc(sizeof(*gpstates), GFP_KERNEL);
+       if (!gpstates)
+               return -ENOMEM;
+
+       policy->driver_data = gpstates;
+
+       /* initialize timer */
+       init_timer_deferrable(&gpstates->timer);
+       gpstates->timer.data = (unsigned long)policy;
+       gpstates->timer.function = gpstate_timer_handler;
+       gpstates->timer.expires = jiffies +
+                               msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
+       spin_lock_init(&gpstates->gpstate_lock);
+       ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+       if (ret < 0)
+               kfree(policy->driver_data);
+
+       return ret;
+}
+
+static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+       /* timer is deleted in cpufreq_cpu_stop() */
+       kfree(policy->driver_data);
+
+       return 0;
 }
 
 static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
@@ -604,15 +846,19 @@ static struct notifier_block powernv_cpufreq_opal_nb = {
 static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 {
        struct powernv_smp_call_data freq_data;
+       struct global_pstate_info *gpstates = policy->driver_data;
 
        freq_data.pstate_id = powernv_pstate_info.min;
+       freq_data.gpstate_id = powernv_pstate_info.min;
        smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+       del_timer_sync(&gpstates->timer);
 }
 
 static struct cpufreq_driver powernv_cpufreq_driver = {
        .name           = "powernv-cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = powernv_cpufreq_cpu_init,
+       .exit           = powernv_cpufreq_cpu_exit,
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = powernv_cpufreq_target_index,
        .get            = powernv_cpufreq_get,
index b4c00a5a6a597a94035b9c7cdb0652c0513edf63..3eace725ccd6c6c20c8db9abfaf139b15d41c3cb 100644 (file)
@@ -17,7 +17,7 @@ int cbe_cpufreq_get_pmode(int cpu);
 
 int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
 
-#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
+#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
 extern bool cbe_cpufreq_has_pmi;
 #else
 #define cbe_cpufreq_has_pmi (0)
index 7969f7690498b69e04403dda45df6d11ea3b1aa1..7c4cd5c634f23006eaba8f253c04e27b93ed91e3 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/timer.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of_platform.h>
 
 #include <asm/processor.h>
@@ -142,15 +142,4 @@ static int __init cbe_cpufreq_pmi_init(void)
 
        return 0;
 }
-
-static void __exit cbe_cpufreq_pmi_exit(void)
-{
-       cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
-       pmi_unregister_handler(&cbe_pmi_handler);
-}
-
-module_init(cbe_cpufreq_pmi_init);
-module_exit(cbe_cpufreq_pmi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+device_initcall(cbe_cpufreq_pmi_init);
index 46fee1539cc87d3eeada498408090a2ebad77875..ce345bf34d5ddfec4f494deb84ca35021c088c0a 100644 (file)
@@ -29,6 +29,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -186,8 +188,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
 
        ret = regulator_set_voltage(vcc_core, vmin, vmax);
        if (ret)
-               pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
-                      vmin, vmax);
+               pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
        return ret;
 }
 
@@ -195,10 +196,10 @@ static void __init pxa_cpufreq_init_voltages(void)
 {
        vcc_core = regulator_get(NULL, "vcc_core");
        if (IS_ERR(vcc_core)) {
-               pr_info("cpufreq: Didn't find vcc_core regulator\n");
+               pr_info("Didn't find vcc_core regulator\n");
                vcc_core = NULL;
        } else {
-               pr_info("cpufreq: Found vcc_core regulator\n");
+               pr_info("Found vcc_core regulator\n");
        }
 }
 #else
@@ -233,9 +234,8 @@ static void pxa27x_guess_max_freq(void)
 {
        if (!pxa27x_maxfreq) {
                pxa27x_maxfreq = 416000;
-               printk(KERN_INFO "PXA CPU 27x max frequency not defined "
-                      "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
-                      pxa27x_maxfreq);
+               pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+                       pxa27x_maxfreq);
        } else {
                pxa27x_maxfreq *= 1000;
        }
@@ -408,7 +408,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
         */
        if (cpu_is_pxa25x()) {
                find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
-               pr_info("PXA255 cpufreq using %s frequency table\n",
+               pr_info("using %s frequency table\n",
                        pxa255_turbo_table ? "turbo" : "run");
 
                cpufreq_table_validate_and_show(policy, pxa255_freq_table);
@@ -417,7 +417,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
                cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
        }
 
-       printk(KERN_INFO "PXA CPU frequency change support initialized\n");
+       pr_info("frequency change support initialized\n");
 
        return 0;
 }
index b23e525a7af3fc21ad9fa85be59d3cb6a3ba27ba..53d8c3fb16f67bfc5a4cba5065c32a5503be4328 100644 (file)
@@ -301,10 +301,11 @@ err_np:
        return -ENODEV;
 }
 
-static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
        struct cpu_data *data = policy->driver_data;
 
+       cpufreq_cooling_unregister(data->cdev);
        kfree(data->pclk);
        kfree(data->table);
        kfree(data);
@@ -333,8 +334,8 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
                cpud->cdev = of_cpufreq_cooling_register(np,
                                                         policy->related_cpus);
 
-               if (IS_ERR(cpud->cdev)) {
-                       pr_err("Failed to register cooling device cpu%d: %ld\n",
+               if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
+                       pr_err("cpu%d is not running as cooling device: %ld\n",
                                        policy->cpu, PTR_ERR(cpud->cdev));
 
                        cpud->cdev = NULL;
@@ -348,7 +349,7 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
        .name           = "qoriq_cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = qoriq_cpufreq_cpu_init,
-       .exit           = __exit_p(qoriq_cpufreq_cpu_exit),
+       .exit           = qoriq_cpufreq_cpu_exit,
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = qoriq_cpufreq_target,
        .get            = cpufreq_generic_get,
index eb262133fef25e31b261edeaf19725a34746759d..b04b6f02bbdcce4382bf9318b072931b43d105c9 100644 (file)
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -197,21 +199,20 @@ static int s3c2412_cpufreq_add(struct device *dev,
 
        hclk = clk_get(NULL, "hclk");
        if (IS_ERR(hclk)) {
-               printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
+               pr_err("cannot find hclk clock\n");
                return -ENOENT;
        }
 
        fclk = clk_get(NULL, "fclk");
        if (IS_ERR(fclk)) {
-               printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
+               pr_err("cannot find fclk clock\n");
                goto err_fclk;
        }
 
        fclk_rate = clk_get_rate(fclk);
        if (fclk_rate > 200000000) {
-               printk(KERN_INFO
-                      "%s: fclk %ld MHz, assuming 266MHz capable part\n",
-                      __func__, fclk_rate / 1000000);
+               pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
+                       fclk_rate / 1000000);
                s3c2412_cpufreq_info.max.fclk = 266000000;
                s3c2412_cpufreq_info.max.hclk = 133000000;
                s3c2412_cpufreq_info.max.pclk =  66000000;
@@ -219,13 +220,13 @@ static int s3c2412_cpufreq_add(struct device *dev,
 
        armclk = clk_get(NULL, "armclk");
        if (IS_ERR(armclk)) {
-               printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
+               pr_err("cannot find arm clock\n");
                goto err_armclk;
        }
 
        xtal = clk_get(NULL, "xtal");
        if (IS_ERR(xtal)) {
-               printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
+               pr_err("cannot find xtal clock\n");
                goto err_xtal;
        }
 
index 0129f5c70a610b941e14bc5f8912681b0f7bc3f7..d0d75b65ddd6df2a070b162c178322e48fbf1239 100644 (file)
@@ -11,6 +11,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -66,7 +68,7 @@ static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
                     __func__, fclk, armclk, hclk_max);
 
        if (armclk > fclk) {
-               printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
+               pr_warn("%s: armclk > fclk\n", __func__);
                armclk = fclk;
        }
 
@@ -273,7 +275,7 @@ static int s3c2440_cpufreq_add(struct device *dev,
        armclk = s3c_cpufreq_clk_get(NULL, "armclk");
 
        if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
-               printk(KERN_ERR "%s: failed to get clocks\n", __func__);
+               pr_err("%s: failed to get clocks\n", __func__);
                return -ENOENT;
        }
 
index 9b7b4289d66cb4c43e692410272acd74d9fd05f8..4d976e8dbb2f4135819a19bbf4ab4b510f8f3d4d 100644 (file)
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
@@ -178,7 +180,7 @@ static int __init s3c_freq_debugfs_init(void)
 {
        dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
        if (IS_ERR(dbgfs_root)) {
-               printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
+               pr_err("%s: error creating debugfs root\n", __func__);
                return PTR_ERR(dbgfs_root);
        }
 
index 68ef8fd9482fd2532762789973b8d4dfa98cfdd5..ae8eaed77b70748e961aaac9f4fb4b8e7ff6447d 100644 (file)
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -175,7 +177,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
        cpu_new.freq.fclk = cpu_new.pll.frequency;
 
        if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
-               printk(KERN_ERR "no divisors for %d\n", target_freq);
+               pr_err("no divisors for %d\n", target_freq);
                goto err_notpossible;
        }
 
@@ -187,7 +189,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
 
        if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
                if (s3c_cpufreq_calcio(&cpu_new) < 0) {
-                       printk(KERN_ERR "%s: no IO timings\n", __func__);
+                       pr_err("%s: no IO timings\n", __func__);
                        goto err_notpossible;
                }
        }
@@ -262,7 +264,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
        return 0;
 
  err_notpossible:
-       printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+       pr_err("no compatible settings for %d\n", target_freq);
        return -EINVAL;
 }
 
@@ -331,7 +333,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
                                                     &index);
 
                if (ret < 0) {
-                       printk(KERN_ERR "%s: no PLL available\n", __func__);
+                       pr_err("%s: no PLL available\n", __func__);
                        goto err_notpossible;
                }
 
@@ -346,7 +348,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
        return s3c_cpufreq_settarget(policy, target_freq, pll);
 
  err_notpossible:
-       printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+       pr_err("no compatible settings for %d\n", target_freq);
        return -EINVAL;
 }
 
@@ -356,7 +358,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
 
        clk = clk_get(dev, name);
        if (IS_ERR(clk))
-               printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
+               pr_err("failed to get clock '%s'\n", name);
 
        return clk;
 }
@@ -378,15 +380,16 @@ static int __init s3c_cpufreq_initclks(void)
 
        if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
            IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
-               printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
+               pr_err("%s: could not get clock(s)\n", __func__);
                return -ENOENT;
        }
 
-       printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
-              clk_get_rate(clk_fclk) / 1000,
-              clk_get_rate(clk_hclk) / 1000,
-              clk_get_rate(clk_pclk) / 1000,
-              clk_get_rate(clk_arm) / 1000);
+       pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
+               __func__,
+               clk_get_rate(clk_fclk) / 1000,
+               clk_get_rate(clk_hclk) / 1000,
+               clk_get_rate(clk_pclk) / 1000,
+               clk_get_rate(clk_arm) / 1000);
 
        return 0;
 }
@@ -424,7 +427,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
 
        ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
        if (ret) {
-               printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
+               pr_err("%s: failed to reset pll/freq\n", __func__);
                return ret;
        }
 
@@ -449,13 +452,12 @@ static struct cpufreq_driver s3c24xx_driver = {
 int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
 {
        if (!info || !info->name) {
-               printk(KERN_ERR "%s: failed to pass valid information\n",
-                      __func__);
+               pr_err("%s: failed to pass valid information\n", __func__);
                return -EINVAL;
        }
 
-       printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
-              info->name);
+       pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
+               info->name);
 
        /* check our driver info has valid data */
 
@@ -478,7 +480,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
        struct s3c_cpufreq_board *ours;
 
        if (!board) {
-               printk(KERN_INFO "%s: no board data\n", __func__);
+               pr_info("%s: no board data\n", __func__);
                return -EINVAL;
        }
 
@@ -487,7 +489,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
 
        ours = kzalloc(sizeof(*ours), GFP_KERNEL);
        if (ours == NULL) {
-               printk(KERN_ERR "%s: no memory\n", __func__);
+               pr_err("%s: no memory\n", __func__);
                return -ENOMEM;
        }
 
@@ -502,15 +504,15 @@ static int __init s3c_cpufreq_auto_io(void)
        int ret;
 
        if (!cpu_cur.info->get_iotiming) {
-               printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
+               pr_err("%s: get_iotiming undefined\n", __func__);
                return -ENOENT;
        }
 
-       printk(KERN_INFO "%s: working out IO settings\n", __func__);
+       pr_info("%s: working out IO settings\n", __func__);
 
        ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
        if (ret)
-               printk(KERN_ERR "%s: failed to get timings\n", __func__);
+               pr_err("%s: failed to get timings\n", __func__);
 
        return ret;
 }
@@ -561,7 +563,7 @@ static void s3c_cpufreq_update_loctkime(void)
        val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
        val |= calc_locktime(rate, cpu_cur.info->locktime_m);
 
-       printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
+       pr_info("%s: new locktime is 0x%08x\n", __func__, val);
        __raw_writel(val, S3C2410_LOCKTIME);
 }
 
@@ -580,7 +582,7 @@ static int s3c_cpufreq_build_freq(void)
 
        ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
        if (!ftab) {
-               printk(KERN_ERR "%s: no memory for tables\n", __func__);
+               pr_err("%s: no memory for tables\n", __func__);
                return -ENOMEM;
        }
 
@@ -608,15 +610,14 @@ static int __init s3c_cpufreq_initcall(void)
                if (cpu_cur.board->auto_io) {
                        ret = s3c_cpufreq_auto_io();
                        if (ret) {
-                               printk(KERN_ERR "%s: failed to get io timing\n",
+                               pr_err("%s: failed to get io timing\n",
                                       __func__);
                                goto out;
                        }
                }
 
                if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
-                       printk(KERN_ERR "%s: no IO support registered\n",
-                              __func__);
+                       pr_err("%s: no IO support registered\n", __func__);
                        ret = -EINVAL;
                        goto out;
                }
@@ -666,9 +667,9 @@ int s3c_plltab_register(struct cpufreq_frequency_table *plls,
                vals += plls_no;
                vals->frequency = CPUFREQ_TABLE_END;
 
-               printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
+               pr_info("%d PLL entries\n", plls_no);
        } else
-               printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
+               pr_err("no memory for PLL tables\n");
 
        return vals ? 0 : -ENOMEM;
 }
index a145b319d1717a996fb193dc3f382a22030d5327..06d85917b6d5d34f5e0aa862f3afd23dbedbe75a 100644 (file)
@@ -9,6 +9,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -205,7 +207,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
        } else if (ch == DMC1) {
                reg = (dmc_base[1] + 0x30);
        } else {
-               printk(KERN_ERR "Cannot find DMC port\n");
+               pr_err("Cannot find DMC port\n");
                return;
        }
 
@@ -534,7 +536,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
        mem_type = check_mem_type(dmc_base[0]);
 
        if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
-               printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+               pr_err("CPUFreq doesn't support this memory type\n");
                ret = -EINVAL;
                goto out_dmc1;
        }
@@ -635,13 +637,13 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
 
        arm_regulator = regulator_get(NULL, "vddarm");
        if (IS_ERR(arm_regulator)) {
-               pr_err("failed to get regulator vddarm");
+               pr_err("failed to get regulator vddarm\n");
                return PTR_ERR(arm_regulator);
        }
 
        int_regulator = regulator_get(NULL, "vddint");
        if (IS_ERR(int_regulator)) {
-               pr_err("failed to get regulator vddint");
+               pr_err("failed to get regulator vddint\n");
                regulator_put(arm_regulator);
                return PTR_ERR(int_regulator);
        }
index ac84e48180148b9a383b81fe43e328e7a56cd8cb..4225501a4b785a1a9f0c76d98fca0db78fd51e41 100644 (file)
@@ -13,6 +13,8 @@
  *     2005-03-30: - initial revision
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -30,8 +32,6 @@
 
 static __u8 __iomem *cpuctl;
 
-#define PFX "sc520_freq: "
-
 static struct cpufreq_frequency_table sc520_freq_table[] = {
        {0, 0x01,       100000},
        {0, 0x02,       133000},
@@ -44,8 +44,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
 
        switch (clockspeed_reg & 0x03) {
        default:
-               printk(KERN_ERR PFX "error: cpuctl register has unexpected "
-                               "value %02x\n", clockspeed_reg);
+               pr_err("error: cpuctl register has unexpected value %02x\n",
+                      clockspeed_reg);
        case 0x01:
                return 100000;
        case 0x02:
@@ -112,7 +112,7 @@ static int __init sc520_freq_init(void)
 
        cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
        if (!cpuctl) {
-               printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
+               pr_err("sc520_freq: error: failed to remap memory\n");
                return -ENOMEM;
        }
 
index de5e89b2eaaa3db13413eec5762620c21623738c..e8a7bf57b31b1033dc45f3141f24e23f64642c67 100644 (file)
@@ -18,6 +18,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -38,10 +39,20 @@ static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
        return scpi_ops->dvfs_get_info(domain);
 }
 
-static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
+static int scpi_get_transition_latency(struct device *cpu_dev)
 {
-       int idx, ret = 0;
+       struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
+
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+       return info->latency;
+}
+
+static int scpi_init_opp_table(const struct cpumask *cpumask)
+{
+       int idx, ret;
        struct scpi_opp *opp;
+       struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
        struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
 
        if (IS_ERR(info))
@@ -51,11 +62,7 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
                return -EIO;
 
        for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
-               if (remove)
-                       dev_pm_opp_remove(cpu_dev, opp->freq);
-               else
-                       ret = dev_pm_opp_add(cpu_dev, opp->freq,
-                                            opp->m_volt * 1000);
+               ret = dev_pm_opp_add(cpu_dev, opp->freq, opp->m_volt * 1000);
                if (ret) {
                        dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
                                 opp->freq, opp->m_volt);
@@ -64,33 +71,19 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
                        return ret;
                }
        }
-       return ret;
-}
 
-static int scpi_get_transition_latency(struct device *cpu_dev)
-{
-       struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
-
-       if (IS_ERR(info))
-               return PTR_ERR(info);
-       return info->latency;
-}
-
-static int scpi_init_opp_table(struct device *cpu_dev)
-{
-       return scpi_opp_table_ops(cpu_dev, false);
-}
-
-static void scpi_free_opp_table(struct device *cpu_dev)
-{
-       scpi_opp_table_ops(cpu_dev, true);
+       ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
+       if (ret)
+               dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+                       __func__, ret);
+       return ret;
 }
 
 static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
        .name   = "scpi",
        .get_transition_latency = scpi_get_transition_latency,
        .init_opp_table = scpi_init_opp_table,
-       .free_opp_table = scpi_free_opp_table,
+       .free_opp_table = dev_pm_opp_cpumask_remove_table,
 };
 
 static int scpi_cpufreq_probe(struct platform_device *pdev)
index 7d4a31571608524e75cfa71c35c5336dea485efa..41bc5397f4bbb3d0a08bcab5c3a75ad976680554 100644 (file)
@@ -13,6 +13,8 @@
  * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -27,7 +29,6 @@
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
 
-#define PFX            "speedstep-centrino: "
 #define MAINTAINER     "linux-pm@vger.kernel.org"
 
 #define INTEL_MSR_RANGE        (0xffff)
@@ -386,8 +387,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                /* check to see if it stuck */
                rdmsr(MSR_IA32_MISC_ENABLE, l, h);
                if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
-                       printk(KERN_INFO PFX
-                               "couldn't enable Enhanced SpeedStep\n");
+                       pr_info("couldn't enable Enhanced SpeedStep\n");
                        return -ENODEV;
                }
        }
index 37555c6b86a7cf843f04187a24c86dfe99aa8d42..b86953a3ddc4aea6e9bedecdd23766c975f87364 100644 (file)
@@ -18,6 +18,8 @@
  *                        SPEEDSTEP - DEFINITIONS                    *
  *********************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -68,13 +70,13 @@ static int speedstep_find_register(void)
        /* get PMBASE */
        pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
        if (!(pmbase & 0x01)) {
-               printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+               pr_err("could not find speedstep register\n");
                return -ENODEV;
        }
 
        pmbase &= 0xFFFFFFFE;
        if (!pmbase) {
-               printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+               pr_err("could not find speedstep register\n");
                return -ENODEV;
        }
 
@@ -136,7 +138,7 @@ static void speedstep_set_state(unsigned int state)
                pr_debug("change to %u MHz succeeded\n",
                        speedstep_get_frequency(speedstep_processor) / 1000);
        else
-               printk(KERN_ERR "cpufreq: change failed - I/O error\n");
+               pr_err("change failed - I/O error\n");
 
        return;
 }
index 15d3214aaa00b466018836e53f83c02fa90d2895..1b8062182c813b359200070306a228d0c0cef18b 100644 (file)
@@ -8,6 +8,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -153,7 +155,7 @@ static unsigned int pentium_core_get_frequency(void)
                fsb = 333333;
                break;
        default:
-               printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
+               pr_err("PCORE - MSR_FSB_FREQ undefined value\n");
        }
 
        rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@@ -453,11 +455,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
                 */
                if (*transition_latency > 10000000 ||
                    *transition_latency < 50000) {
-                       printk(KERN_WARNING PFX "frequency transition "
-                                       "measured seems out of range (%u "
-                                       "nSec), falling back to a safe one of"
-                                       "%u nSec.\n",
-                                       *transition_latency, 500000);
+                       pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n",
+                               *transition_latency, 500000);
                        *transition_latency = 500000;
                }
        }
index 819229e824fb69dde06d44d8726d95b5697d29da..770a9ae1999a96a7fa02f747662a56b173532e5f 100644 (file)
@@ -12,6 +12,8 @@
  *                        SPEEDSTEP - DEFINITIONS                    *
  *********************************************************************/
 
+#define pr_fmt(fmt) "cpufreq: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -204,9 +206,8 @@ static void speedstep_set_state(unsigned int state)
                        (speedstep_freqs[new_state].frequency / 1000),
                        retry, result);
        else
-               printk(KERN_ERR "cpufreq: change to state %u "
-                       "failed with new_state %u and result %u\n",
-                       state, new_state, result);
+               pr_err("change to state %u failed with new_state %u and result %u\n",
+                      state, new_state, result);
 
        return;
 }
index a9c659f589747a2140f2b91e0c07cd7fe1cbcb69..04042038ec4b75e78631eda625f0f1bb2a4b4d95 100644 (file)
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
 {
        int ret;
 
+       if ((!of_machine_is_compatible("st,stih407")) &&
+               (!of_machine_is_compatible("st,stih410")))
+               return -ENODEV;
+
        ddata.cpu = get_cpu_device(0);
        if (!ddata.cpu) {
                dev_err(ddata.cpu, "Failed to get device for CPU0\n");
index 20bcceb58ccc04f4963c1a2686fb0c18ee6fcc5b..43530254201a8b3a5f98fdcb032ea6c3b635bb96 100644 (file)
@@ -14,7 +14,6 @@
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
 #include <linux/clk.h>
-#include <linux/cpufreq-dt.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -69,10 +68,6 @@ static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
        clk_set_parent(priv->cpu_clk, priv->pllx_clk);
 }
 
-static struct cpufreq_dt_platform_data cpufreq_dt_pd = {
-       .independent_clocks = false,
-};
-
 static int tegra124_cpufreq_probe(struct platform_device *pdev)
 {
        struct tegra124_cpufreq_priv *priv;
@@ -129,8 +124,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
 
        cpufreq_dt_devinfo.name = "cpufreq-dt";
        cpufreq_dt_devinfo.parent = &pdev->dev;
-       cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
-       cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);
 
        priv->cpufreq_dt_pdev =
                platform_device_register_full(&cpufreq_dt_devinfo);
index 433e93fd4900a7bccd474b04bf15889ff831f4bc..87e5bdc5ec74cb4e03be68ca4e7134e297bbfaad 100644 (file)
@@ -18,6 +18,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -26,8 +27,9 @@
 
 #include "arm_big_little.h"
 
-static int ve_spc_init_opp_table(struct device *cpu_dev)
+static int ve_spc_init_opp_table(const struct cpumask *cpumask)
 {
+       struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
        /*
         * platform specific SPC code must initialise the opp table
         * so just check if the OPP count is non-zero
index 545069d5fdfba3e7ab07f0ec296404c64ad2a977..e342565e8715e95af6c76a80ca393d8fcfada66a 100644 (file)
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
                 * call the CPU ops suspend protocol with idle index as a
                 * parameter.
                 */
-               arm_cpuidle_suspend(idx);
+               ret = arm_cpuidle_suspend(idx);
 
                cpu_pm_exit();
        }
index f996efc56605a3d4739ae0bcac83099fd65b0751..2b8e6ce62e816eb3887b983d47a8a2a4c96929e1 100644 (file)
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 
        struct cpuidle_state *target_state = &drv->states[index];
        bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
-       ktime_t time_start, time_end;
+       u64 time_start, time_end;
        s64 diff;
 
        /*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        sched_idle_set_state(target_state);
 
        trace_cpu_idle_rcuidle(index, dev->cpu);
-       time_start = ktime_get();
+       time_start = local_clock();
 
        stop_critical_timings();
        entered_state = target_state->enter(dev, drv, index);
        start_critical_timings();
 
-       time_end = ktime_get();
+       time_end = local_clock();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
        /* The cpu is no longer idle or about to enter idle. */
@@ -217,7 +217,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        if (!cpuidle_state_is_coupled(drv, entered_state))
                local_irq_enable();
 
-       diff = ktime_to_us(ktime_sub(time_end, time_start));
+       /*
+        * local_clock() returns the time in nanosecond, let's shift
+        * by 10 (divide by 1024) to have microsecond based time.
+        */
+       diff = (time_end - time_start) >> 10;
        if (diff > INT_MAX)
                diff = INT_MAX;
 
@@ -433,6 +437,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
        list_del(&dev->device_list);
        per_cpu(cpuidle_devices, dev->cpu) = NULL;
        module_put(drv->owner);
+
+       dev->registered = 0;
 }
 
 static void __cpuidle_device_init(struct cpuidle_device *dev)
index b5d05807e6ecd509058fcb85a3f4810ce17a8adc..fa4ea22ca12e5512940114262241f5338f838e16 100644 (file)
@@ -355,7 +355,7 @@ int psci_cpu_suspend_enter(unsigned long index)
 
 /* ARM specific CPU idle operations */
 #ifdef CONFIG_ARM
-static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
        .suspend = psci_cpu_suspend_enter,
        .init = psci_dt_cpu_init_idle,
 };
index 815c4a5cae543e228a634eae28241bc6f24ac1ed..1b95475b6aefefd5144bca29ab464304826c6377 100644 (file)
@@ -77,7 +77,7 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
 static inline void fw_cfg_read_blob(u16 key,
                                    void *buf, loff_t pos, size_t count)
 {
-       u32 glk;
+       u32 glk = -1U;
        acpi_status status;
 
        /* If we have ACPI, ensure mutual exclusion against any potential
index d9ab0cd1d205963528d7022d66213c26bf8304e5..4d9a315cfd43ea8fb9d97b0578f9c6bd4c38ef00 100644 (file)
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-       int error;
-
-       error = pm_runtime_get_sync(&p->pdev->dev);
-       if (error < 0)
-               return error;
-
-       return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_put(&p->pdev->dev);
-}
-
 static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
 {
        struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-       struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-       int error;
-
-       error = pm_runtime_get_sync(&p->pdev->dev);
-       if (error < 0)
-               return error;
-
-       error = pinctrl_request_gpio(chip->base + offset);
-       if (error)
-               pm_runtime_put(&p->pdev->dev);
-
-       return error;
+       return pinctrl_request_gpio(chip->base + offset);
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
-       struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
        pinctrl_free_gpio(chip->base + offset);
 
-       /* Set the GPIO as an input to ensure that the next GPIO request won't
+       /*
+        * Set the GPIO as an input to ensure that the next GPIO request won't
         * drive the GPIO pin as an output.
         */
        gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
-       pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
 
        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        irq_chip->irq_unmask = gpio_rcar_irq_enable;
        irq_chip->irq_set_type = gpio_rcar_irq_set_type;
        irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
-       irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
-       irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
-       irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
-       irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
        irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
 
        ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
 err1:
        gpiochip_remove(gpio_chip);
 err0:
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
        return ret;
 }
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
 
        gpiochip_remove(&p->gpio_chip);
 
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
index 682070d20f001dce1aef51147d99aca97cb2938c..2dc52585e3f2f1cc79664beba50ef453dcea1950 100644 (file)
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
                lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
                if (lookup) {
                        lookup->adev = adev;
-                       lookup->con_id = con_id;
+                       lookup->con_id = kstrdup(con_id, GFP_KERNEL);
                        list_add_tail(&lookup->node, &acpi_crs_lookup_list);
                }
        }
index e557fc1f17c8becfb3ddae209942326d05d5a884..7ecea83ce453cae868306b95b1600a01653a6de7 100644 (file)
@@ -541,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        if (!metadata_size) {
                if (bo->metadata_size) {
                        kfree(bo->metadata);
+                       bo->metadata = NULL;
                        bo->metadata_size = 0;
                }
                return 0;
index 1e0bba29e16796f97c8eee38fc9d3100c405f6bc..1cd6de575305aa3f657fa1a7f5a70232b272d739 100644 (file)
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
            && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
                adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+       /* vertical FP must be at least 1 */
+       if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+               adjusted_mode->crtc_vsync_start++;
+
        /* get the native mode for scaling */
        if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
                amdgpu_panel_mode_fixup(encoder, adjusted_mode);
index 30798cbc6fc08938cc2adc62089148d545c118a6..6d2fb3f4ac628fddaaa5f6da8f724c3787330c5b 100644 (file)
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
+       int ret;
 
        /*
         * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
+
+       /*
+        * Note that we need to set the power state explicitly, since we
+        * powered off the device during freeze and the PCI core won't power
+        * it back up for us during thaw. Powering off the device during
+        * freeze is not a hard requirement though, and during the
+        * suspend/resume phases the PCI core makes sure we get here with the
+        * device powered on. So in case we change our freeze logic and keep
+        * the device powered we can also remove the following set power state
+        * call.
+        */
+       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       if (ret) {
+               DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+               goto out;
+       }
+
+       /*
+        * Note that pci_enable_device() first enables any parent bridge
+        * device and only then sets the power state for this device. The
+        * bridge enabling is a nop though, since bridge devices are resumed
+        * first. The order of enabling power and enabling the device is
+        * imposed by the PCI core as described above, so here we preserve the
+        * same order for the freeze/thaw phases.
+        *
+        * TODO: eventually we should remove pci_disable_device() /
+        * pci_enable_enable_device() from suspend/resume. Due to how they
+        * depend on the device enable refcount we can't anyway depend on them
+        * disabling/enabling the device.
+        */
        if (pci_enable_device(dev->pdev)) {
                ret = -EIO;
                goto out;
index f76cbf3e5d1e1999afa61b62bf9740c308e552bb..fffdac801d3b0da03abdd65b2c90e50fb1913e54 100644 (file)
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells {
 #define GEN6_RP_STATE_CAP      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
 
-#define INTERVAL_1_28_US(us)   (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us)   roundup(((us) * 100) >> 7, 25)
 #define INTERVAL_1_33_US(us)   (((us) * 3)   >> 2)
 #define INTERVAL_0_833_US(us)  (((us) * 6) / 5)
 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
index 62de9f4bce09959a8deb756c0e51ecb4e202315a..3b57bf06abe8598c1c3b6fba0dc8e19b7f192619 100644 (file)
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
        } else if (IS_BROADWELL(dev_priv)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
-               ddi_translations_edp = bdw_ddi_translations_edp;
+
+               if (dev_priv->edp_low_vswing) {
+                       ddi_translations_edp = bdw_ddi_translations_edp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+               } else {
+                       ddi_translations_edp = bdw_ddi_translations_dp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+               }
+
                ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-               n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
                n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
                hdmi_default_entry = 7;
@@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        intel_ddi_clock_get(encoder, pipe_config);
 }
 
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
-       /* HDMI has nothing special to destroy, so we can go with this. */
-       intel_dp_encoder_destroy(encoder);
-}
-
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-       .destroy = intel_ddi_destroy,
+       .reset = intel_dp_encoder_reset,
+       .destroy = intel_dp_encoder_destroy,
 };
 
 static struct intel_connector *
@@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_encoder->post_disable = intel_ddi_post_disable;
        intel_encoder->get_hw_state = intel_ddi_get_hw_state;
        intel_encoder->get_config = intel_ddi_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
 
        intel_dig_port->port = port;
        intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
index 6e0d8283daa66d6383516c28542850d5c7364a48..182f84937345d5b3d778d9521ca17f4b04608037 100644 (file)
@@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (state->legacy_cursor_update)
+                       continue;
+
                ret = intel_crtc_wait_for_pending_flips(crtc);
                if (ret)
                        return ret;
index f069a82deb57a42a814e2e5a30f5f1b1e4c51ad8..412a34c39522fc8474b87eaa02a5532f8a32d859 100644 (file)
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
        edp_panel_vdd_schedule_off(intel_dp);
 }
 
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp;
 
index 4c027d69fac92c99d6a67b6a98f98d9f67d84e60..7d3af3a72abea7ac557f5f107aaf597bc38d5165 100644 (file)
@@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
index a0d8daed24701cf279ec6a452538eb814e092022..1ab6f687f6408ff9d72956fec2ebee33640c1755 100644 (file)
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                                hdmi_to_dig_port(intel_hdmi));
        }
 
-       if (!live_status)
-               DRM_DEBUG_KMS("Live status not up!");
+       if (!live_status) {
+               DRM_DEBUG_KMS("HDMI live status down\n");
+               /*
+                * Live status register is not reliable on all intel platforms.
+                * So consider live_status only for certain platforms, for
+                * others, read EDID to determine presence of sink.
+                */
+               if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+                       live_status = true;
+       }
 
        intel_hdmi_unset_edid(connector);
 
index edd05cdb0cd843dfe9ee53ecbac7f4a8908d3dd2..587cae4e73c9abeeb65d2187b9e700bb8f0fa442 100644 (file)
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
            && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
                adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+       /* vertical FP must be at least 1 */
+       if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+               adjusted_mode->crtc_vsync_start++;
+
        /* get the native mode for scaling */
        if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
                radeon_panel_mode_fixup(encoder, adjusted_mode);
index e00db3f510dd425c62565d913c937c5638a072d5..abb98c77bad25c6901b910095068a3aec5059311 100644 (file)
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        goto err_register;
                }
 
-               pdev->dev.of_node = of_node;
                pdev->dev.parent = dev;
 
                ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        platform_device_put(pdev);
                        goto err_register;
                }
+
+               /*
+                * Set of_node only after calling platform_device_add. Otherwise
+                * the platform:imx-ipuv3-crtc modalias won't be used.
+                */
+               pdev->dev.of_node = of_node;
        }
 
        return 0;
index c6eaff5f88451f770cf036e71b9167a0202351e7..0238f0169e48f5d45b345498c5ba713c8cb9a96d 100644 (file)
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
 
 #define USB_VENDOR_ID_CREATIVELABS     0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51     0x322c
 #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
 
 #define USB_VENDOR_ID_CVTOUCH          0x1ff7
index ed2f68edc8f1c648e30b15178fc550cb9a071f88..53fc856d6867b032e6e2100c1c87c3bf5733702c 100644 (file)
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
index 02c4efea241c02eb6270d19b40cc90256b04721a..cf2ba43453fd4ecd9458075a9767676534a354fe 100644 (file)
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
 
                wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
 
+               wacom->shared->stylus_in_proximity = true;
                return 1;
        }
 
@@ -3395,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
        { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
          INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
          .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+       { "Wacom DTK1651", 34616, 19559, 1023, 0,
+         DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+         WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC };
@@ -3560,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x33C) },
        { USB_DEVICE_WACOM(0x33D) },
        { USB_DEVICE_WACOM(0x33E) },
+       { USB_DEVICE_WACOM(0x343) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
index 5613e2b5cff7759861a1e2d7ad2748f7626d16da..a40a73a7b71da359574bf4061d80340b1caa30a4 100644 (file)
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  *    there is room for the producer to send the pending packet.
  */
 
-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
-                                     struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
 {
        u32 cur_write_sz;
        u32 r_size;
-       u32 write_loc = rbi->ring_buffer->write_index;
+       u32 write_loc;
        u32 read_loc = rbi->ring_buffer->read_index;
-       u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+       u32 pending_sz;
 
+       /*
+        * Issue a full memory barrier before making the signaling decision.
+        * Here is the reason for having this barrier:
+        * If the reading of the pend_sz (in this function)
+        * were to be reordered and read before we commit the new read
+        * index (in the calling function)  we could
+        * have a problem. If the host were to set the pending_sz after we
+        * have sampled pending_sz and go to sleep before we commit the
+        * read index, we could miss sending the interrupt. Issue a full
+        * memory barrier to address this.
+        */
+       mb();
+
+       pending_sz = rbi->ring_buffer->pending_send_sz;
+       write_loc = rbi->ring_buffer->write_index;
        /* If the other end is not blocked on write don't bother. */
        if (pending_sz == 0)
                return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
        cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
                        read_loc - write_loc;
 
-       if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+       if (cur_write_sz >= pending_sz)
                return true;
 
        return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
        /* Update the read index */
        hv_set_next_read_location(inring_info, next_read_location);
 
-       *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
+       *signal = hv_need_to_signal_on_read(inring_info);
 
        return ret;
 }
index c6935de425fa22ed2b0d08e7d55b5ae02ccde0c6..c96649292b556f18647f1dc2a3fa075cfe176ae8 100644 (file)
@@ -766,6 +766,67 @@ static struct cpuidle_state knl_cstates[] = {
                .enter = NULL }
 };
 
+static struct cpuidle_state bxt_cstates[] = {
+       {
+               .name = "C1-BXT",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00),
+               .exit_latency = 2,
+               .target_residency = 2,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C1E-BXT",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01),
+               .exit_latency = 10,
+               .target_residency = 20,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C6-BXT",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 133,
+               .target_residency = 133,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C7s-BXT",
+               .desc = "MWAIT 0x31",
+               .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 155,
+               .target_residency = 155,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C8-BXT",
+               .desc = "MWAIT 0x40",
+               .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 1000,
+               .target_residency = 1000,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C9-BXT",
+               .desc = "MWAIT 0x50",
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 2000,
+               .target_residency = 2000,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C10-BXT",
+               .desc = "MWAIT 0x60",
+               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 10000,
+               .target_residency = 10000,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .enter = NULL }
+};
+
 /**
  * intel_idle
  * @dev: cpuidle_device
@@ -950,6 +1011,11 @@ static const struct idle_cpu idle_cpu_knl = {
        .state_table = knl_cstates,
 };
 
+static const struct idle_cpu idle_cpu_bxt = {
+       .state_table = bxt_cstates,
+       .disable_promotion_to_c1e = true,
+};
+
 #define ICPU(model, cpu) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -985,6 +1051,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        ICPU(0x9e, idle_cpu_skl),
        ICPU(0x55, idle_cpu_skx),
        ICPU(0x57, idle_cpu_knl),
+       ICPU(0x5c, idle_cpu_bxt),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -1075,6 +1142,73 @@ static void ivt_idle_state_table_update(void)
 
        /* else, 1 and 2 socket systems use default ivt_cstates */
 }
+
+/*
+ * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+ */
+
+static unsigned int irtl_ns_units[] = {
+       1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+
+static unsigned long long irtl_2_usec(unsigned long long irtl)
+{
+       unsigned long long ns;
+
+       ns = irtl_ns_units[(irtl >> 10) & 0x3];
+
+       return div64_u64((irtl & 0x3FF) * ns, 1000);
+}
+/*
+ * bxt_idle_state_table_update(void)
+ *
+ * On BXT, we trust the IRTL to show the definitive maximum latency
+ * We use the same value for target_residency.
+ */
+static void bxt_idle_state_table_update(void)
+{
+       unsigned long long msr;
+
+       rdmsrl(MSR_PKGC6_IRTL, msr);
+       if (msr) {
+               unsigned int usec = irtl_2_usec(msr);
+
+               bxt_cstates[2].exit_latency = usec;
+               bxt_cstates[2].target_residency = usec;
+       }
+
+       rdmsrl(MSR_PKGC7_IRTL, msr);
+       if (msr) {
+               unsigned int usec = irtl_2_usec(msr);
+
+               bxt_cstates[3].exit_latency = usec;
+               bxt_cstates[3].target_residency = usec;
+       }
+
+       rdmsrl(MSR_PKGC8_IRTL, msr);
+       if (msr) {
+               unsigned int usec = irtl_2_usec(msr);
+
+               bxt_cstates[4].exit_latency = usec;
+               bxt_cstates[4].target_residency = usec;
+       }
+
+       rdmsrl(MSR_PKGC9_IRTL, msr);
+       if (msr) {
+               unsigned int usec = irtl_2_usec(msr);
+
+               bxt_cstates[5].exit_latency = usec;
+               bxt_cstates[5].target_residency = usec;
+       }
+
+       rdmsrl(MSR_PKGC10_IRTL, msr);
+       if (msr) {
+               unsigned int usec = irtl_2_usec(msr);
+
+               bxt_cstates[6].exit_latency = usec;
+               bxt_cstates[6].target_residency = usec;
+       }
+
+}
 /*
  * sklh_idle_state_table_update(void)
  *
@@ -1130,6 +1264,9 @@ static void intel_idle_state_table_update(void)
        case 0x3e: /* IVT */
                ivt_idle_state_table_update();
                break;
+       case 0x5c: /* BXT */
+               bxt_idle_state_table_update();
+               break;
        case 0x5e: /* SKL-H */
                sklh_idle_state_table_update();
                break;
index dbee13ad33a3c341777beb60715ec1f7007fd767..2e154cb5168567ce3f09ef095bb65a639802d7e4 100644 (file)
@@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev)
        if (ret)
                goto vref_disable;
 
+       platform_set_drvdata(pdev, indio_dev);
+
        ret = iio_device_register(indio_dev);
        if (ret < 0)
                goto per_clk_disable_unprepare;
index f581256d9d4c2473eca8979b3c0075c308f844a8..5ee4e0dc093e687445cfb3daab47f8a7f0e182fe 100644 (file)
@@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
        return 0;
 }
 
+static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+{
+       const struct acpi_device_id *id;
+
+       id = acpi_match_device(dev->driver->acpi_match_table, dev);
+       if (!id)
+               return NULL;
+
+       *chip_id = (int)id->driver_data;
+
+       return dev_name(dev);
+}
+
 /**
  *  inv_mpu_probe() - probe function.
  *  @client:          i2c client.
@@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct inv_mpu6050_state *st;
-       int result;
-       const char *name = id ? id->name : NULL;
+       int result, chip_type;
        struct regmap *regmap;
+       const char *name;
 
        if (!i2c_check_functionality(client->adapter,
                                     I2C_FUNC_SMBUS_I2C_BLOCK))
                return -EOPNOTSUPP;
 
+       if (id) {
+               chip_type = (int)id->driver_data;
+               name = id->name;
+       } else if (ACPI_HANDLE(&client->dev)) {
+               name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
+               if (!name)
+                       return -ENODEV;
+       } else {
+               return -ENOSYS;
+       }
+
        regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
        if (IS_ERR(regmap)) {
                dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client,
        }
 
        result = inv_mpu_core_probe(regmap, client->irq, name,
-                                   NULL, id->driver_data);
+                                   NULL, chip_type);
        if (result < 0)
                return result;
 
index dea6c4361de013c38441100dc26078fd13393a89..7bcb8d839f0549a1a89c6291a78796841cf1db5c 100644 (file)
@@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi)
        struct regmap *regmap;
        const struct spi_device_id *id = spi_get_device_id(spi);
        const char *name = id ? id->name : NULL;
+       const int chip_type = id ? id->driver_data : 0;
 
        regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
        if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi)
        }
 
        return inv_mpu_core_probe(regmap, spi->irq, name,
-                                 inv_mpu_i2c_disable, id->driver_data);
+                                 inv_mpu_i2c_disable, chip_type);
 }
 
 static int inv_mpu_remove(struct spi_device *spi)
index 9c5c9ef3f1dad25f37e108aeb9752940f46aa393..0e931a9a1669424278a1daa935a919e32db32e35 100644 (file)
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
        int rc;
        int irq;
 
+       init_waitqueue_head(&data->data_ready_queue);
+       clear_bit(0, &data->flags);
        if (client->irq)
                irq = client->irq;
        else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
                return rc;
        }
 
-       init_waitqueue_head(&data->data_ready_queue);
-       clear_bit(0, &data->flags);
        data->eoc_irq = irq;
 
        return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
        int eoc_gpio;
        int err;
        const char *name = NULL;
-       enum asahi_compass_chipset chipset;
+       enum asahi_compass_chipset chipset = AK_MAX_TYPE;
 
        /* Grab and set up the supplied GPIO. */
        if (client->dev.platform_data)
index 80b6bedc172f32a1d9dfad1e36ee8aa3a4a16240..64b3d11dcf1ed588a95d149f2bbd9ff884562893 100644 (file)
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        struct Scsi_Host *shost;
        struct iser_conn *iser_conn = NULL;
        struct ib_conn *ib_conn;
+       u32 max_fr_sectors;
        u16 max_cmds;
 
        shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                iser_conn = ep->dd_data;
                max_cmds = iser_conn->max_cmds;
                shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
-               shost->max_sectors = iser_conn->scsi_max_sectors;
 
                mutex_lock(&iser_conn->state_mutex);
                if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                 */
                shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
                        ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-               shost->max_sectors = min_t(unsigned int,
-                       1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
 
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                        goto free_host;
        }
 
+       /*
+        * FRs or FMRs can only map up to a (device) page per entry, but if the
+        * first entry is misaligned we'll end up using using two entries
+        * (head and tail) for a single page worth data, so we have to drop
+        * one segment from the calculation.
+        */
+       max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+       shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
@@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
        .queuecommand           = iscsi_queuecommand,
        .change_queue_depth     = scsi_change_queue_depth,
        .sg_tablesize           = ISCSI_ISER_DEF_SG_TABLESIZE,
-       .max_sectors            = ISER_DEF_MAX_SECTORS,
        .cmd_per_lun            = ISER_DEF_CMD_PER_LUN,
        .eh_abort_handler       = iscsi_eh_abort,
        .eh_device_reset_handler= iscsi_eh_device_reset,
index 53e33fab3f7afadef16e97a76271752d9d6d891c..df3581f606282a2890b8ce78cf13d9e4882c3c0a 100644 (file)
@@ -181,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
 {
        struct vibra_info *info = container_of(work,
                                struct vibra_info, play_work);
+       int ret;
+
+       /* Do not allow effect, while the routing is set to use audio */
+       ret = twl6040_get_vibralr_status(info->twl6040);
+       if (ret & TWL6040_VIBSEL) {
+               dev_info(info->dev, "Vibra is configured for audio\n");
+               return;
+       }
 
        mutex_lock(&info->mutex);
 
@@ -199,14 +207,6 @@ static int vibra_play(struct input_dev *input, void *data,
                      struct ff_effect *effect)
 {
        struct vibra_info *info = input_get_drvdata(input);
-       int ret;
-
-       /* Do not allow effect, while the routing is set to use audio */
-       ret = twl6040_get_vibralr_status(info->twl6040);
-       if (ret & TWL6040_VIBSEL) {
-               dev_info(&input->dev, "Vibra is configured for audio\n");
-               return -EBUSY;
-       }
 
        info->weak_speed = effect->u.rumble.weak_magnitude;
        info->strong_speed = effect->u.rumble.strong_magnitude;
index 2160512e861af57d289fb5873ef80b81c2fbf7c9..5af7907d0af4dd6ab79639ee91287cdf0bc90202 100644 (file)
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
        return 0;
 }
 
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+       int error;
+
+       enable_irq(data->irq);
+
+       error = mxt_process_messages_until_invalid(data);
+       if (error)
+               return error;
+
+       return 0;
+}
+
 static int mxt_soft_reset(struct mxt_data *data)
 {
        struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
        /* Ignore CHG line for 100ms after reset */
        msleep(100);
 
-       enable_irq(data->irq);
+       mxt_acquire_irq(data);
 
        ret = mxt_wait_for_completion(data, &data->reset_completion,
                                      MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
        return ret;
 }
 
-static int mxt_acquire_irq(struct mxt_data *data)
-{
-       int error;
-
-       enable_irq(data->irq);
-
-       error = mxt_process_messages_until_invalid(data);
-       if (error)
-               return error;
-
-       return 0;
-}
-
 static int mxt_get_info(struct mxt_data *data)
 {
        struct i2c_client *client = data->client;
index 9bbadaaf6bc3723f044a9c44e8619773d242ec8f..7b3845aa5983ad57892a0022990f3353bcca82b6 100644 (file)
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
                        point.coord_x = point.coord_y = 0;
                }
 
-               point.state = payload[9 * i + 5] & 0x03;
-               point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+               point.state = payload[9 * i + 5] & 0x0f;
+               point.id = (payload[9 * i + 5] & 0xf0) >> 4;
 
                /* determine touch major, minor and orientation */
                point.area_major = max(payload[9 * i + 6],
index 194580fba7fd8b7a965ea87f9856cada61e15be6..14d3b37944df031214c2c6951ed15c46da104842 100644 (file)
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
         * go away inside make_request
         */
        sectors = bio_sectors(bio);
+       /* bio could be mergeable after passing to underlayer */
+       bio->bi_rw &= ~REQ_NOMERGE;
        mddev->pers->make_request(mddev, bio);
 
        cpu = part_stat_lock();
index 2ea12c6bf65997895e26a89fe8977451ae438d97..34783a3c8b3c1af780a092b8d11018f702541481 100644 (file)
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
                        (unsigned long long)zone_size>>1);
                zone_start = conf->strip_zone[j].zone_end;
        }
-       printk(KERN_INFO "\n");
 }
 
 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
        unsigned short blksize = 512;
 
+       *private_conf = ERR_PTR(-ENOMEM);
        if (!conf)
                return -ENOMEM;
        rdev_for_each(rdev1, mddev) {
index 8ab8b65e17413e4a015aab5e073728fd23e21b47..e48c262ce0322fe504d92d14403c70cb3ef4a608 100644 (file)
@@ -3502,8 +3502,6 @@ returnbi:
                                dev = &sh->dev[i];
                        } else if (test_bit(R5_Discard, &dev->flags))
                                discard_pending = 1;
-                       WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
-                       WARN_ON(dev->page != dev->orig_page);
                }
 
        r5l_stripe_write_finished(sh);
index 6e43c95629ea8e8fc550b6fc63bf56b5c627b1ac..3cfd7af8c5cab06607a94c716951daa6166b60e2 100644 (file)
@@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(media_device_find_devres);
 
+#if IS_ENABLED(CONFIG_PCI)
 void media_device_pci_init(struct media_device *mdev,
                           struct pci_dev *pci_dev,
                           const char *name)
 {
-#ifdef CONFIG_PCI
        mdev->dev = &pci_dev->dev;
 
        if (name)
@@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev,
        mdev->driver_version = LINUX_VERSION_CODE;
 
        media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
 
+#if IS_ENABLED(CONFIG_USB)
 void __media_device_usb_init(struct media_device *mdev,
                             struct usb_device *udev,
                             const char *board_name,
                             const char *driver_name)
 {
-#ifdef CONFIG_USB
        mdev->dev = &udev->dev;
 
        if (driver_name)
@@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev,
        mdev->driver_version = LINUX_VERSION_CODE;
 
        media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
 
 
 #endif /* CONFIG_MEDIA_CONTROLLER */
index feb521f28e14857b8344d7320769c87183cd17ac..4f494acd8150fd85f8aaedfb3572b85bb22b460d 100644 (file)
@@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, fmd);
 
-       /* Protect the media graph while we're registering entities */
-       mutex_lock(&fmd->media_dev.graph_mutex);
-
        ret = fimc_md_register_platform_entities(fmd, dev->of_node);
-       if (ret) {
-               mutex_unlock(&fmd->media_dev.graph_mutex);
+       if (ret)
                goto err_clk;
-       }
 
        ret = fimc_md_register_sensor_entities(fmd);
-       if (ret) {
-               mutex_unlock(&fmd->media_dev.graph_mutex);
+       if (ret)
                goto err_m_ent;
-       }
-
-       mutex_unlock(&fmd->media_dev.graph_mutex);
 
        ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
        if (ret)
index 0b44b9accf50794172ddb2249414883a1b125b43..af237af204e2760fc0efcaac6ac2dc31207ef662 100644 (file)
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_sens;
 
-       mutex_lock(&camif->media_dev.graph_mutex);
-
        ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
        if (ret < 0)
-               goto err_unlock;
+               goto err_sens;
 
        ret = camif_register_video_nodes(camif);
        if (ret < 0)
-               goto err_unlock;
+               goto err_sens;
 
        ret = camif_create_media_links(camif);
        if (ret < 0)
-               goto err_unlock;
-
-       mutex_unlock(&camif->media_dev.graph_mutex);
+               goto err_sens;
 
        ret = media_device_register(&camif->media_dev);
        if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
        pm_runtime_put(dev);
        return 0;
 
-err_unlock:
-       mutex_unlock(&camif->media_dev.graph_mutex);
 err_sens:
        v4l2_device_unregister(&camif->v4l2_dev);
        media_device_unregister(&camif->media_dev);
index e94c7fb6712aab35a1a3278241c3bab494f68777..88e45234d527518e0d449d598c5f04a5be5538a1 100644 (file)
@@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                        ret = -EFAULT;
                        goto free_ret;
                }
+               /* Ensure desc has not changed between the two reads */
+               if (memcmp(&dd, dd_config, sizeof(dd))) {
+                       ret = -EINVAL;
+                       goto free_ret;
+               }
                mutex_lock(&vdev->vdev_mutex);
                mutex_lock(&vi->vop_mutex);
                ret = vop_virtio_add_device(vdev, dd_config);
index a2904029cccc2949b90d66cc8fb7d3fcbc11f103..5e572b3510b9483a6cf38a055ed473ab83776ab0 100644 (file)
@@ -2181,7 +2181,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
                               struct net_device *bridge)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int i, err;
+       int i, err = 0;
 
        mutex_lock(&ps->smi_mutex);
 
index 12a009d720cde6d85fd660c798616aee82be0ba9..72eb29ed0359e5fcac6c962ea46386aa5b050c88 100644 (file)
@@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        struct page *page;
        dma_addr_t mapping;
        u16 sw_prod = rxr->rx_sw_agg_prod;
+       unsigned int offset = 0;
 
-       page = alloc_page(gfp);
-       if (!page)
-               return -ENOMEM;
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+               page = rxr->rx_page;
+               if (!page) {
+                       page = alloc_page(gfp);
+                       if (!page)
+                               return -ENOMEM;
+                       rxr->rx_page = page;
+                       rxr->rx_page_offset = 0;
+               }
+               offset = rxr->rx_page_offset;
+               rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+               if (rxr->rx_page_offset == PAGE_SIZE)
+                       rxr->rx_page = NULL;
+               else
+                       get_page(page);
+       } else {
+               page = alloc_page(gfp);
+               if (!page)
+                       return -ENOMEM;
+       }
 
-       mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+       mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
        if (dma_mapping_error(&pdev->dev, mapping)) {
                __free_page(page);
@@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
 
        rx_agg_buf->page = page;
+       rx_agg_buf->offset = offset;
        rx_agg_buf->mapping = mapping;
        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
        rxbd->rx_bd_opaque = sw_prod;
@@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
                page = cons_rx_buf->page;
                cons_rx_buf->page = NULL;
                prod_rx_buf->page = page;
+               prod_rx_buf->offset = cons_rx_buf->offset;
 
                prod_rx_buf->mapping = cons_rx_buf->mapping;
 
@@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 
                cons_rx_buf = &rxr->rx_agg_ring[cons];
-               skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+               skb_fill_page_desc(skb, i, cons_rx_buf->page,
+                                  cons_rx_buf->offset, frag_len);
                __clear_bit(cons, rxr->rx_agg_bmap);
 
                /* It is possible for bnxt_alloc_rx_page() to allocate
@@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                        return NULL;
                }
 
-               dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+               dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
 
                skb->data_len += frag_len;
@@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
 
                        dma_unmap_page(&pdev->dev,
                                       dma_unmap_addr(rx_agg_buf, mapping),
-                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
                        rx_agg_buf->page = NULL;
                        __clear_bit(j, rxr->rx_agg_bmap);
 
                        __free_page(page);
                }
+               if (rxr->rx_page) {
+                       __free_page(rxr->rx_page);
+                       rxr->rx_page = NULL;
+               }
        }
 }
 
@@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
                return 0;
 
-       type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+       type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
                RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
 
        bnxt_init_rxbd_pages(ring, type);
@@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
        bp->rx_agg_nr_pages = 0;
 
        if (bp->flags & BNXT_FLAG_TPA)
-               agg_factor = 4;
+               agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
 
        bp->flags &= ~BNXT_FLAG_JUMBO;
        if (rx_space > PAGE_SIZE) {
@@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
                /* Number of segs are log2 units, and first packet is not
                 * included as part of this units.
                 */
-               if (mss <= PAGE_SIZE) {
-                       n = PAGE_SIZE / mss;
+               if (mss <= BNXT_RX_PAGE_SIZE) {
+                       n = BNXT_RX_PAGE_SIZE / mss;
                        nsegs = (MAX_SKB_FRAGS - 1) * n;
                } else {
-                       n = mss / PAGE_SIZE;
-                       if (mss & (PAGE_SIZE - 1))
+                       n = mss / BNXT_RX_PAGE_SIZE;
+                       if (mss & (BNXT_RX_PAGE_SIZE - 1))
                                n++;
                        nsegs = (MAX_SKB_FRAGS - n) / n;
                }
@@ -4309,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
        if (bp->flags & BNXT_FLAG_MSIX_CAP)
                rc = bnxt_setup_msix(bp);
 
-       if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
                /* fallback to INTA */
                rc = bnxt_setup_inta(bp);
        }
index 709b95b8fcbad5742399607c9ec98a504ac05754..8b823ff558ffe2ab81422c0ce6bdfa61d3bc2b20 100644 (file)
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
 
 #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
 
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
 #define BNXT_MIN_PKT_SIZE      45
 
 #define BNXT_NUM_TESTS(bp)     0
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
 
 struct bnxt_sw_rx_agg_bd {
        struct page             *page;
+       unsigned int            offset;
        dma_addr_t              mapping;
 };
 
@@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
        unsigned long           *rx_agg_bmap;
        u16                     rx_agg_bmap_size;
 
+       struct page             *rx_page;
+       unsigned int            rx_page_offset;
+
        dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
        dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
 
index 48a7d7dee8461117b3f47bcbdebe4c0745385bf7..a63551d0a18a4b63a5ea385a46fabef39b0d36a8 100644 (file)
@@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp)
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
-       bp->mii_bus->parent = &bp->dev->dev;
+       bp->mii_bus->parent = &bp->pdev->dev;
        pdata = dev_get_platdata(&bp->pdev->dev);
 
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
                                struct phy_device *phydev;
 
                                phydev = mdiobus_scan(bp->mii_bus, i);
-                               if (IS_ERR(phydev)) {
+                               if (IS_ERR(phydev) &&
+                                   PTR_ERR(phydev) != -ENODEV) {
                                        err = PTR_ERR(phydev);
                                        break;
                                }
@@ -3019,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev)
        if (err)
                goto err_out_free_netdev;
 
+       err = macb_mii_init(bp);
+       if (err)
+               goto err_out_free_netdev;
+
+       phydev = bp->phy_dev;
+
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_unregister_mdio;
        }
 
-       err = macb_mii_init(bp);
-       if (err)
-               goto err_out_unregister_netdev;
-
-       netif_carrier_off(dev);
+       phy_attached_info(phydev);
 
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
                    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
                    dev->base_addr, dev->irq, dev->dev_addr);
 
-       phydev = bp->phy_dev;
-       phy_attached_info(phydev);
-
        return 0;
 
-err_out_unregister_netdev:
-       unregister_netdev(dev);
+err_out_unregister_mdio:
+       phy_disconnect(bp->phy_dev);
+       mdiobus_unregister(bp->mii_bus);
+       mdiobus_free(bp->mii_bus);
+
+       /* Shutdown the PHY if there is a GPIO reset */
+       if (bp->reset_gpio)
+               gpiod_set_value(bp->reset_gpio, 0);
 
 err_out_free_netdev:
        free_netdev(dev);
index 60908eab3b3adf805d1b6cef462bd24ec5f64fe6..43da891fab97e7f16b572dd67f3fc6aa9020a1fb 100644 (file)
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
        u8 cpus[SGE_QSETS + 1];
-       u16 rspq_map[RSS_TABLE_SIZE];
+       u16 rspq_map[RSS_TABLE_SIZE + 1];
 
        for (i = 0; i < SGE_QSETS; ++i)
                cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
                rspq_map[i] = i % nq0;
                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
        }
+       rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 
        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
index 7fc490225da507de93c66c1b55eba3f0e5295e58..a6d26d351dfc47c777b39c04a44c2f17bca0feab 100644 (file)
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Enable per-CPU interrupts on the CPU that is
                 * brought up.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_enable,
-                                        pp, true);
+               mvneta_percpu_enable(pp);
 
                /* Enable per-CPU interrupt on the one CPU we care
                 * about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Disable per-CPU interrupts on the CPU that is
                 * brought down.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_disable,
-                                        pp, true);
+               mvneta_percpu_disable(pp);
 
                break;
        case CPU_DEAD:
index 7ace07dad6a31d4b18ab5aa1e5335c0727cff596..c442f6ad15fff806af5f747e70d085766dd37ae9 100644 (file)
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
                return 0;
 
        pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+       if (IS_ERR(pep->phy))
+               return PTR_ERR(pep->phy);
        if (!pep->phy)
                return -ENODEV;
 
index 1cf722eba607d82e8e34ff498e1b154f8fc8cc5d..559d11a443bc4973aae435e611f45ee014cf41fb 100644 (file)
@@ -14,6 +14,7 @@ config MLX5_CORE_EN
        bool "Mellanox Technologies ConnectX-4 Ethernet support"
        depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
        select PTP_1588_CLOCK
+       select VXLAN if MLX5_CORE=y
        default n
        ---help---
          Ethernet support in Mellanox Technologies ConnectX-4 NIC.
index e80ce94b5dcf0e139b629732da3658892ca8590d..3881dce0cc30b6829c8a850ce9ff887024d665a9 100644 (file)
@@ -567,6 +567,7 @@ struct mlx5e_priv {
        struct mlx5e_vxlan_db      vxlan;
 
        struct mlx5e_params        params;
+       struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
        struct delayed_work        update_stats_work;
index 67d548b70e142a376d0fc802a4576ee965f92520..d4dfc5ce516a41daf2f668aa3015c37ff4b73853 100644 (file)
@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                mlx5e_update_stats(priv);
-               schedule_delayed_work(dwork,
-                                     msecs_to_jiffies(
-                                             MLX5E_UPDATE_STATS_INTERVAL));
+               queue_delayed_work(priv->wq, dwork,
+                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
        }
        mutex_unlock(&priv->state_lock);
 }
@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
-               schedule_work(&priv->update_carrier_work);
+               queue_work(priv->wq, &priv->update_carrier_work);
                break;
 
        default:
@@ -1505,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
 
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
        return 0;
 
@@ -1961,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1976,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
        netif_addr_unlock_bh(netdev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return 0;
 }
@@ -2158,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2169,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2498,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        priv = netdev_priv(netdev);
 
+       priv->wq = create_singlethread_workqueue("mlx5e");
+       if (!priv->wq)
+               goto err_free_netdev;
+
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               goto err_free_netdev;
+               goto err_destroy_wq;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2580,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                vxlan_get_rx_port(netdev);
 
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return priv;
 
@@ -2617,6 +2620,9 @@ err_dealloc_pd:
 err_unmap_free_uar:
        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+       destroy_workqueue(priv->wq);
+
 err_free_netdev:
        free_netdev(netdev);
 
@@ -2630,9 +2636,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
+       flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
                mutex_lock(&priv->state_lock);
@@ -2655,6 +2661,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+       destroy_workqueue(priv->wq);
 
        if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
                free_netdev(netdev);
index 8ba080e441a1863e63d4188820afbd2f1c39bd6e..5ff8af472bf5221e736e5369b4de73bfe996b0e9 100644 (file)
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
-       iounmap(uar->map);
-       iounmap(uar->bf_map);
+       if (uar->map)
+               iounmap(uar->map);
+       else
+               iounmap(uar->bf_map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
 EXPORT_SYMBOL(mlx5_unmap_free_uar);
index 9f10df25f3cd52d9f631fd1e0963483eb1e7073a..f2fd1ef16da7eba68deb5af1648b7ff28cf7ff44 100644 (file)
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        return vxlan;
 }
 
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
-       if (err)
-               return err;
+       if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+               goto free_work;
 
        vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
-       if (!vxlan) {
-               err = -ENOMEM;
+       if (!vxlan)
                goto err_delete_port;
-       }
 
        vxlan->udp_port = port;
 
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
        if (err)
                goto err_free;
 
-       return 0;
+       goto free_work;
 
 err_free:
        kfree(vxlan);
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-       return err;
+free_work:
+       kfree(vxlan_work);
 }
 
 static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
        kfree(vxlan);
 }
 
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
-       if (!mlx5e_vxlan_lookup_port(priv, port))
-               return;
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
+       u16 port = vxlan_work->port;
 
        __mlx5e_vxlan_core_del_port(priv, port);
+
+       kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add)
+{
+       struct mlx5e_vxlan_work *vxlan_work;
+
+       vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+       if (!vxlan_work)
+               return;
+
+       if (add)
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+       else
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+       vxlan_work->priv = priv;
+       vxlan_work->port = port;
+       vxlan_work->sa_family = sa_family;
+       queue_work(priv->wq, &vxlan_work->work);
 }
 
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
index a01685056ab156201c1945607e34bb7b53771ee3..129f3527aa147ca8cf02b7df2128b7cfeaadfecf 100644 (file)
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
        u16 udp_port;
 };
 
+struct mlx5e_vxlan_work {
+       struct work_struct      work;
+       struct mlx5e_priv       *priv;
+       sa_family_t             sa_family;
+       u16                     port;
+};
+
 static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 {
        return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 }
 
 void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int  mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add);
 struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
 
index 270c9eeb7ab622955d237ae9cd96cadc5caa5f3a..6d1a956e3f779d57e83a8fd40a5136919231df2c 100644 (file)
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
 
        del_timer_sync(&mgp->watchdog_timer);
        mgp->running = MYRI10GE_ETH_STOPPING;
-       local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
        for (i = 0; i < mgp->num_slices; i++) {
                napi_disable(&mgp->ss[i].napi);
+               local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
                /* Lock the slice to prevent the busy_poll handler from
                 * accessing it.  Later when we bring the NIC up, myri10ge_open
                 * resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
                        pr_info("Slice %d locked\n", i);
                        mdelay(1);
                }
+               local_bh_enable();
        }
-       local_bh_enable();
        netif_carrier_off(dev);
 
        netif_tx_stop_all_queues(dev);
index 98d33d462c6ca0f6b78499cde0b8d3e2b51f8b07..1681084cc96f8270d36e55d9ede099a58632566a 100644 (file)
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
                return 0;
        }
 
+       if (nic_data->datapath_caps &
+           1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+               return -EOPNOTSUPP;
+
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
                       nic_data->vport_id);
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                                      bool replacing)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       u32 flags = spec->flags;
 
        memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
 
+       /* Remove RSS flag if we don't have an RSS context. */
+       if (flags & EFX_FILTER_FLAG_RX_RSS &&
+           spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+           nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+               flags &= ~EFX_FILTER_FLAG_RX_RSS;
+
        if (replacing) {
                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
                               MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
                       0 : spec->dmaq_id);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
-                      (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+                      (flags & EFX_FILTER_FLAG_RX_RSS) ?
                       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
                       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
-       if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+       if (flags & EFX_FILTER_FLAG_RX_RSS)
                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
                               spec->rss_context !=
                               EFX_FILTER_RSS_CONTEXT_DEFAULT ?
index bbb77cd8ad6776df47d01549cceb7165735c15f0..e2fcdf1eec441939af5e59dc2a581de2ebd02694 100644 (file)
@@ -367,7 +367,6 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
-       struct device_node              *phy_node;
        struct napi_struct              napi_rx;
        struct napi_struct              napi_tx;
        struct device                   *dev;
@@ -1148,25 +1147,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-       if (priv->phy_node)
-               slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+       if (slave->data->phy_node) {
+               slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
                                 &cpsw_adjust_link, 0, slave->data->phy_if);
-       else
+               if (!slave->phy) {
+                       dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+                               slave->data->phy_node->full_name,
+                               slave->slave_num);
+                       return;
+               }
+       } else {
                slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
-       if (IS_ERR(slave->phy)) {
-               dev_err(priv->dev, "phy %s not found on slave %d\n",
-                       slave->data->phy_id, slave->slave_num);
-               slave->phy = NULL;
-       } else {
-               phy_attached_info(slave->phy);
+               if (IS_ERR(slave->phy)) {
+                       dev_err(priv->dev,
+                               "phy \"%s\" not found on slave %d, err %ld\n",
+                               slave->data->phy_id, slave->slave_num,
+                               PTR_ERR(slave->phy));
+                       slave->phy = NULL;
+                       return;
+               }
+       }
 
-               phy_start(slave->phy);
+       phy_attached_info(slave->phy);
 
-               /* Configure GMII_SEL register */
-               cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
-                            slave->slave_num);
-       }
+       phy_start(slave->phy);
+
+       /* Configure GMII_SEL register */
+       cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
 }
 
 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1940,12 +1948,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
                         struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct device_node *slave_node;
-       struct cpsw_platform_data *data = &priv->data;
        int i = 0, ret;
        u32 prop;
 
@@ -2033,25 +2040,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
                if (strcmp(slave_node->name, "slave"))
                        continue;
 
-               priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+               slave_data->phy_node = of_parse_phandle(slave_node,
+                                                       "phy-handle", 0);
                parp = of_get_property(slave_node, "phy_id", &lenp);
-               if (of_phy_is_fixed_link(slave_node)) {
-                       struct device_node *phy_node;
-                       struct phy_device *phy_dev;
-
+               if (slave_data->phy_node) {
+                       dev_dbg(&pdev->dev,
+                               "slave[%d] using phy-handle=\"%s\"\n",
+                               i, slave_data->phy_node->full_name);
+               } else if (of_phy_is_fixed_link(slave_node)) {
                        /* In the case of a fixed PHY, the DT node associated
                         * to the PHY is the Ethernet MAC DT node.
                         */
                        ret = of_phy_register_fixed_link(slave_node);
                        if (ret)
                                return ret;
-                       phy_node = of_node_get(slave_node);
-                       phy_dev = of_phy_find_device(phy_node);
-                       if (!phy_dev)
-                               return -ENODEV;
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, phy_dev->mdio.bus->id,
-                                phy_dev->mdio.addr);
+                       slave_data->phy_node = of_node_get(slave_node);
                } else if (parp) {
                        u32 phyid;
                        struct device_node *mdio_node;
@@ -2072,7 +2075,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
                        snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                                 PHY_ID_FMT, mdio->name, phyid);
                } else {
-                       dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
+                       dev_err(&pdev->dev,
+                               "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+                               i);
                        goto no_phy_slave;
                }
                slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2275,7 +2280,7 @@ static int cpsw_probe(struct platform_device *pdev)
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (cpsw_probe_dt(priv, pdev)) {
+       if (cpsw_probe_dt(&priv->data, pdev)) {
                dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
index 442a7038e660c877b1cc7509efab7df214a25a87..e50afd1b2eda09d87a94b100646d774de52f32b5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/phy.h>
 
 struct cpsw_slave_data {
+       struct device_node *phy_node;
        char            phy_id[MII_BUS_ID_SIZE];
        int             phy_if;
        u8              mac_addr[ETH_ALEN];
index 58d58f002559f627040a8bfb3fa452f671c62e44..f56d66e6ec155194e1f432ee42950d9db915c697 100644 (file)
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
 
        /* TODO: Add phy read and write and private statistics get feature */
 
-       return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+       if (priv->phydev)
+               return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+       else
+               return -EOPNOTSUPP;
 }
 
 static int match_first_device(struct device *dev, void *data)
index 13214a6492ac5b1eced4d39c21b7736f5dcf19d4..743b18266a7c2b2be2b33f2d3f7fea0f9b80e361 100644 (file)
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
                        continue;
 
                /* copy hw scan info */
-               memcpy(target->hwinfo, scan_info, scan_info->size);
+               memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
                target->essid_len = strnlen(scan_info->essid,
                                            sizeof(scan_info->essid));
                target->rate_len = 0;
index b3ffaee3085885e02a2556f5d34517320a875f1b..f279a897a5c7fe0e875fb8b058f4c00ae3059f62 100644 (file)
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
         * in the FIFO. In such cases, the FIFO enters an error mode it
         * cannot recover from by software.
         */
-       if (phydev->drv->phy_id == ATH8030_PHY_ID) {
-               if (phydev->state == PHY_NOLINK) {
-                       if (priv->gpiod_reset && !priv->phy_reset) {
-                               struct at803x_context context;
-
-                               at803x_context_save(phydev, &context);
-
-                               gpiod_set_value(priv->gpiod_reset, 1);
-                               msleep(1);
-                               gpiod_set_value(priv->gpiod_reset, 0);
-                               msleep(1);
-
-                               at803x_context_restore(phydev, &context);
-
-                               phydev_dbg(phydev, "%s(): phy was reset\n",
-                                          __func__);
-                               priv->phy_reset = true;
-                       }
-               } else {
-                       priv->phy_reset = false;
+       if (phydev->state == PHY_NOLINK) {
+               if (priv->gpiod_reset && !priv->phy_reset) {
+                       struct at803x_context context;
+
+                       at803x_context_save(phydev, &context);
+
+                       gpiod_set_value(priv->gpiod_reset, 1);
+                       msleep(1);
+                       gpiod_set_value(priv->gpiod_reset, 0);
+                       msleep(1);
+
+                       at803x_context_restore(phydev, &context);
+
+                       phydev_dbg(phydev, "%s(): phy was reset\n",
+                                  __func__);
+                       priv->phy_reset = true;
                }
+       } else {
+               priv->phy_reset = false;
        }
 }
 
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
        .phy_id_mask            = 0xffffffef,
        .probe                  = at803x_probe,
        .config_init            = at803x_config_init,
-       .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
        .phy_id_mask            = 0xffffffef,
        .probe                  = at803x_probe,
        .config_init            = at803x_config_init,
-       .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
index f20890ee03f33368fd68c6b5fb82f8fd76fa4310..f64778ad97535118d5b0705c1a5afa04ab57677c 100644 (file)
@@ -269,6 +269,7 @@ struct skb_data {           /* skb->cb is one of these */
        struct lan78xx_net *dev;
        enum skb_state state;
        size_t length;
+       int num_of_packet;
 };
 
 struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
 
 static void lan78xx_link_status_change(struct net_device *net)
 {
-       /* nothing to do */
+       struct phy_device *phydev = net->phydev;
+       int ret, temp;
+
+       /* At forced 100 F/H mode, chip may fail to set mode correctly
+        * when cable is switched between long(~50+m) and short one.
+        * As workaround, set to 10 before setting to 100
+        * at forced 100 F/H mode.
+        */
+       if (!phydev->autoneg && (phydev->speed == 100)) {
+               /* disable phy interrupt */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+               ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+               temp = phy_read(phydev, MII_BMCR);
+               temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+               phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+               temp |= BMCR_SPEED100;
+               phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+               /* clear pending interrupt generated while workaround */
+               temp = phy_read(phydev, LAN88XX_INT_STS);
+
+               /* enable phy interrupt back */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+               ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+       }
 }
 
 static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
        struct lan78xx_net *dev = entry->dev;
 
        if (urb->status == 0) {
-               dev->net->stats.tx_packets++;
+               dev->net->stats.tx_packets += entry->num_of_packet;
                dev->net->stats.tx_bytes += entry->length;
        } else {
                dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
                return;
        }
 
-       skb->protocol = eth_type_trans(skb, dev->net);
        dev->net->stats.rx_packets++;
        dev->net->stats.rx_bytes += skb->len;
 
+       skb->protocol = eth_type_trans(skb, dev->net);
+
        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
                  skb->len + sizeof(struct ethhdr), skb->protocol);
        memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
 
        skb_totallen = 0;
        pkt_cnt = 0;
+       count = 0;
+       length = 0;
        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
                if (skb_is_gso(skb)) {
                        if (pkt_cnt) {
                                /* handle previous packets first */
                                break;
                        }
-                       length = skb->len;
+                       count = 1;
+                       length = skb->len - TX_OVERHEAD;
                        skb2 = skb_dequeue(tqp);
                        goto gso_skb;
                }
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
        for (count = pos = 0; count < pkt_cnt; count++) {
                skb2 = skb_dequeue(tqp);
                if (skb2) {
+                       length += (skb2->len - TX_OVERHEAD);
                        memcpy(skb->data + pos, skb2->data, skb2->len);
                        pos += roundup(skb2->len, sizeof(u32));
                        dev_kfree_skb(skb2);
                }
        }
 
-       length = skb_totallen;
-
 gso_skb:
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
        entry->urb = urb;
        entry->dev = dev;
        entry->length = length;
+       entry->num_of_packet = count;
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        ret = usb_autopm_get_interface_async(dev->intf);
index f840802159158b56a5b840d7abb83152fcd0c83f..82129eef77748f25ffe05f4754f4f6df81e3e8cd 100644 (file)
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
        int ret;
 
        read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
-       data[0] = 0xc9;
+       data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
        data[1] = 0;
        if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
                data[1] |= 0x20;        /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
                pkt_len = buf[count - 3] << 8;
                pkt_len += buf[count - 4];
                pkt_len &= 0xfff;
-               pkt_len -= 8;
+               pkt_len -= 4;
        }
 
        /*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
 goon:
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
        rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
        if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
        }
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
 try_again:
        status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
 
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
        if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
                if (res == -ENODEV)
index 30033dbe666263f12ed05cbe7d2fe1e924b156e4..c369db99c005bd69e73217a60bfa87458ebeec9b 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc75xx.h"
 
 #define SMSC_CHIPNAME                  "smsc75xx"
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 
 static void smsc75xx_init_mac_address(struct usbnet *dev)
 {
+       const u8 *mac_addr;
+
+       /* maybe the boot loader passed the MAC address in devicetree */
+       mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+               return;
+       }
+
        /* try reading mac address from EEPROM */
        if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
                        dev->net->dev_addr) == 0) {
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
                }
        }
 
-       /* no eeprom, or eeprom values are invalid. generate random MAC */
+       /* no useful static MAC address found. generate a random one */
        eth_hw_addr_random(dev->net);
        netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
index 66b3ab9f614eb07edb05757ee333e998f7841f7d..2edc2bc6d1b9fb3201a5f4d564dffed93e30bddc 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc95xx.h"
 
 #define SMSC_CHIPNAME                  "smsc95xx"
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 
 static void smsc95xx_init_mac_address(struct usbnet *dev)
 {
+       const u8 *mac_addr;
+
+       /* maybe the boot loader passed the MAC address in devicetree */
+       mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+               return;
+       }
+
        /* try reading mac address from EEPROM */
        if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
                        dev->net->dev_addr) == 0) {
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
                }
        }
 
-       /* no eeprom, or eeprom values are invalid. generate random MAC */
+       /* no useful static MAC address found. generate a random one */
        eth_hw_addr_random(dev->net);
        netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
index 8f8793004b9f021c7f689e13dabcf32f10e2c3a0..1b271b99c49eee91a96639ee019d370f61d0cda0 100644 (file)
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
        };
        static const int inc[4] = { 0, 100, 0, 0 };
 
+       memset(&mask_m, 0, sizeof(int8_t) * 123);
+       memset(&mask_p, 0, sizeof(int8_t) * 123);
+
        cur_bin = -6000;
        upper = bin + 100;
        lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
        int tmp, new;
        int i;
 
-       int8_t mask_m[123];
-       int8_t mask_p[123];
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
                cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
                if (AR_NO_SPUR == cur_bb_spur)
index db6624527d9959d3ffd10a32f3fa76cb0b6dff83..53d7445a5d12c1edaaf012e4e853560ec1f75e39 100644 (file)
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
        int i;
        struct chan_centers centers;
 
-       int8_t mask_m[123];
-       int8_t mask_p[123];
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        ath9k_hw_get_channel_centers(ah, chan, &centers);
        freq = centers.synth_center;
 
index 97be104d12030f194a07ab67b5f0e0b8ee15f3e6..b5c57eebf995301e023e683fb672afb6876924b6 100644 (file)
@@ -93,7 +93,7 @@
 #define IWL8260_SMEM_OFFSET            0x400000
 #define IWL8260_SMEM_LEN               0x68000
 
-#define IWL8000_FW_PRE "iwlwifi-8000"
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
        IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
index f899666acb41f44f56a326f5b0bfcff6dce88bde..9e45bf9c60715c523a2af0729713c8726783fa2c 100644 (file)
@@ -238,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
        snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
                 name_pre, tag);
 
-       /*
-        * Starting 8000B - FW name format has changed. This overwrites the
-        * previous name and uses the new format.
-        */
-       if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
-
-               if (rev_step != 'A')
-                       snprintf(drv->firmware_name,
-                                sizeof(drv->firmware_name), "%s%c-%s.ucode",
-                                name_pre, rev_step, tag);
-       }
-
        IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
                       (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
                                ? "EXPERIMENTAL " : "",
@@ -1060,11 +1047,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
-       if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-                !gscan_capa,
-                "GSCAN is supported but capabilities TLV is unavailable\n"))
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, or if it has an old format,
+        * warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           !gscan_capa) {
+               IWL_DEBUG_INFO(drv,
+                              "GSCAN is supported but capabilities TLV is unavailable\n");
                __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
                            capa->_capa);
+       }
 
        return 0;
 
index 4856eac120f60d5eff2e9707e828e541761a76ae..6938cd37be57c6e48ae9efc4d559b58845fd705c 100644 (file)
@@ -526,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
        /* Make room for fw's virtual image pages, if it exists */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block)
                file_len += mvm->num_of_paging_blk *
                        (sizeof(*dump_data) +
                         sizeof(struct iwl_fw_error_dump_paging) +
@@ -643,7 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        }
 
        /* Dump fw's virtual image */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block) {
                for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
                        struct iwl_fw_error_dump_paging *paging;
                        struct page *pages =
index 594cd0dc7df937d8b495f9c2864de13ed51f795d..09d895fafaf29dbf2de1cb824c0c02ae068acc96 100644 (file)
@@ -144,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
 
                __free_pages(mvm->fw_paging_db[i].fw_paging_block,
                             get_order(mvm->fw_paging_db[i].fw_paging_size));
+               mvm->fw_paging_db[i].fw_paging_block = NULL;
        }
        kfree(mvm->trans->paging_download_buf);
        mvm->trans->paging_download_buf = NULL;
+       mvm->trans->paging_db = NULL;
 
        memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 }
index 05b968506836af92426ee275d8efb35dc78c64cb..79d7cd7d461e49afda911356928b253f3c0ab794 100644 (file)
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
index f798899338eddb6955c06f5b44c26c405dffdade..5101f3ab4f296a2edc0a290fc4ab87a3ba12f9bb 100644 (file)
@@ -397,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
         */
        start += start_pad;
        npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
-       if (nd_pfn->mode == PFN_MODE_PMEM)
-               offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
+       if (nd_pfn->mode == PFN_MODE_PMEM) {
+               unsigned long memmap_size;
+
+               /*
+                * vmemmap_populate_hugepages() allocates the memmap array in
+                * HPAGE_SIZE chunks.
+                */
+               memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+               offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
                        - start;
-       else if (nd_pfn->mode == PFN_MODE_RAM)
+       else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
        else
                goto err;
index 8ba19bba31569f22c6529a739fa1d30adf5c2f0f..2bb3c5799ac4b0146b2760d8823fb1eb28a6bdaa 100644 (file)
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
        if (ret)
                goto close_banks;
 
-       while (val_size) {
+       while (val_size >= reg_size) {
                if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
                        /* fill up non-data register */
                        *buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
                }
 
                buf++;
-               val_size--;
+               val_size -= reg_size;
                offset += reg_size;
        }
 
index 96168b8190449ccc1bee6e24ef0758af1bb3c69c..e165b7ce29d7dda18c6d84306a73cdba28818b6c 100644 (file)
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
        struct list_head node;
        struct mport_dev *md;
        enum rio_mport_map_dir dir;
-       u32 rioid;
+       u16 rioid;
        u64 rio_addr;
        dma_addr_t phys_addr; /* for mmap */
        void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
 
 struct rio_mport_dma_map {
        int valid;
-       uint64_t length;
+       u64 length;
        void *vaddr;
        dma_addr_t paddr;
 };
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
        struct kfifo            event_fifo;
        wait_queue_head_t       event_rx_wait;
        spinlock_t              fifo_lock;
-       unsigned int            event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+       u32                     event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct dma_chan         *dmach;
        struct list_head        async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
                return -EFAULT;
 
        if ((maint_io.offset % 4) ||
-           (maint_io.length == 0) || (maint_io.length % 4))
+           (maint_io.length == 0) || (maint_io.length % 4) ||
+           (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
                return -EINVAL;
 
        buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
                offset += 4;
        }
 
-       if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length)))
+       if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
+                                  buffer, maint_io.length)))
                ret = -EFAULT;
 out:
        vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
                return -EFAULT;
 
        if ((maint_io.offset % 4) ||
-           (maint_io.length == 0) || (maint_io.length % 4))
+           (maint_io.length == 0) || (maint_io.length % 4) ||
+           (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
                return -EINVAL;
 
        buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
                return -ENOMEM;
        length = maint_io.length;
 
-       if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) {
+       if (unlikely(copy_from_user(buffer,
+                       (void __user *)(uintptr_t)maint_io.buffer, length))) {
                ret = -EFAULT;
                goto out;
        }
@@ -360,7 +364,7 @@ out:
  */
 static int
 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
-                                 u32 rioid, u64 raddr, u32 size,
+                                 u16 rioid, u64 raddr, u32 size,
                                  dma_addr_t *paddr)
 {
        struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
 
        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -394,7 +398,7 @@ err_map_outb:
 
 static int
 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
-                              u32 rioid, u64 raddr, u32 size,
+                              u16 rioid, u64 raddr, u32 size,
                               dma_addr_t *paddr)
 {
        struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
        dma_addr_t paddr;
        int ret;
 
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
 
        map.handle = paddr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap))))
+       if (unlikely(copy_to_user(arg, &map, sizeof(map))))
                return -EFAULT;
        return 0;
 }
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
        if (!md->mport->ops->unmap_outb)
                return -EPROTONOSUPPORT;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
 
        rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint16_t hdid;
+       u16 hdid;
 
-       if (copy_from_user(&hdid, arg, sizeof(uint16_t)))
+       if (copy_from_user(&hdid, arg, sizeof(hdid)))
                return -EFAULT;
 
        md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint32_t comptag;
+       u32 comptag;
 
-       if (copy_from_user(&comptag, arg, sizeof(uint32_t)))
+       if (copy_from_user(&comptag, arg, sizeof(comptag)))
                return -EFAULT;
 
        rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
  * @xfer: data transfer descriptor structure
  */
 static int
-rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
+rio_dma_transfer(struct file *filp, u32 transfer_mode,
                 enum rio_transfer_sync sync, enum dma_data_direction dir,
                 struct rio_transfer_io *xfer)
 {
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
                unsigned long offset;
                long pinned;
 
-               offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK;
+               offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
                nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
 
                page_list = kmalloc_array(nr_pages,
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
        if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
                return -EFAULT;
 
-       if (transaction.count != 1)
+       if (transaction.count != 1) /* only single transfer for now */
                return -EINVAL;
 
        if ((transaction.transfer_mode &
             priv->md->properties.transfer_mode) == 0)
                return -ENODEV;
 
-       transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io));
+       transfer = vmalloc(transaction.count * sizeof(*transfer));
        if (!transfer)
                return -ENOMEM;
 
-       if (unlikely(copy_from_user(transfer, transaction.block,
-             transaction.count * sizeof(struct rio_transfer_io)))) {
+       if (unlikely(copy_from_user(transfer,
+                                   (void __user *)(uintptr_t)transaction.block,
+                                   transaction.count * sizeof(*transfer)))) {
                ret = -EFAULT;
                goto out_free;
        }
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
                ret = rio_dma_transfer(filp, transaction.transfer_mode,
                        transaction.sync, dir, &transfer[i]);
 
-       if (unlikely(copy_to_user(transaction.block, transfer,
-             transaction.count * sizeof(struct rio_transfer_io))))
+       if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
+                                 transfer,
+                                 transaction.count * sizeof(*transfer))))
                ret = -EFAULT;
 
 out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
 }
 
 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
-                       uint64_t size, struct rio_mport_mapping **mapping)
+                       u64 size, struct rio_mport_mapping **mapping)
 {
        struct rio_mport_mapping *map;
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
        struct rio_mport_mapping *mapping = NULL;
        int ret;
 
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
 
        map.dma_handle = mapping->phys_addr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) {
+       if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
                mutex_lock(&md->buf_mutex);
                kref_put(&mapping->ref, mport_release_mapping);
                mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
        int ret = -EFAULT;
        struct rio_mport_mapping *map, *_map;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
        rmcd_debug(EXIT, "filp=%p", filp);
 
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
 
 static int
 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
-                               u64 raddr, u32 size,
+                               u64 raddr, u64 size,
                                struct rio_mport_mapping **mapping)
 {
        struct rio_mport *mport = md->mport;
        struct rio_mport_mapping *map;
        int ret;
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       /* rio_map_inb_region() accepts u32 size */
+       if (size > 0xffffffff)
+               return -EINVAL;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
 
        if (raddr == RIO_MAP_ANY_ADDR)
                raddr = map->phys_addr;
-       ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0);
+       ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
        if (ret < 0)
                goto err_map_inb;
 
@@ -1288,7 +1298,7 @@ err_dma_alloc:
 
 static int
 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
-                             u64 raddr, u32 size,
+                             u64 raddr, u64 size,
                              struct rio_mport_mapping **mapping)
 {
        struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
 
        if (!md->mport->ops->map_inb)
                return -EPROTONOSUPPORT;
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
        map.handle = mapping->phys_addr;
        map.rio_addr = mapping->rio_addr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) {
+       if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
                /* Delete mapping if it was created by this request */
                if (ret == 0 && mapping->filp == filp) {
                        mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
        if (!md->mport->ops->unmap_inb)
                return -EPROTONOSUPPORT;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
 
        mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint32_t port_idx = md->mport->index;
+       u32 port_idx = md->mport->index;
 
        rmcd_debug(MPORT, "port_index=%d", port_idx);
 
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
        handled = 0;
        spin_lock(&data->db_lock);
        list_for_each_entry(db_filter, &data->doorbells, data_node) {
-               if (((db_filter->filter.rioid == 0xffffffff ||
+               if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
                      db_filter->filter.rioid == src)) &&
                      info >= db_filter->filter.low &&
                      info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
        if (copy_from_user(&filter, arg, sizeof(filter)))
                return -EFAULT;
 
+       if (filter.low > filter.high)
+               return -EINVAL;
+
        spin_lock_irqsave(&priv->md->db_lock, flags);
        list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
                if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
                return -EEXIST;
        }
 
-       size = sizeof(struct rio_dev);
+       size = sizeof(*rdev);
        mport = md->mport;
-       destid = (u16)dev_info.destid;
-       hopcount = (u8)dev_info.hopcount;
+       destid = dev_info.destid;
+       hopcount = dev_info.hopcount;
 
        if (rio_mport_read_config_32(mport, destid, hopcount,
                                     RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
                do {
                        rdev = rio_get_comptag(dev_info.comptag, rdev);
                        if (rdev && rdev->dev.parent == &mport->net->dev &&
-                           rdev->destid == (u16)dev_info.destid &&
-                           rdev->hopcount == (u8)dev_info.hopcount)
+                           rdev->destid == dev_info.destid &&
+                           rdev->hopcount == dev_info.hopcount)
                                break;
                } while (rdev);
        }
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
                return maint_port_idx_get(data, (void __user *)arg);
        case RIO_MPORT_GET_PROPERTIES:
                md->properties.hdid = md->mport->host_deviceid;
-               if (copy_to_user((void __user *)arg, &(data->md->properties),
-                                sizeof(data->md->properties)))
+               if (copy_to_user((void __user *)arg, &(md->properties),
+                                sizeof(md->properties)))
                        return -EFAULT;
                return 0;
        case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
        case RIO_DISABLE_PORTWRITE_RANGE:
                return rio_mport_remove_pw_filter(data, (void __user *)arg);
        case RIO_SET_EVENT_MASK:
-               data->event_mask = arg;
+               data->event_mask = (u32)arg;
                return 0;
        case RIO_GET_EVENT_MASK:
                if (copy_to_user((void __user *)arg, &data->event_mask,
-                                   sizeof(data->event_mask)))
+                                   sizeof(u32)))
                        return -EFAULT;
                return 0;
        case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
                        return -EINVAL;
 
                ret = rio_mport_send_doorbell(mport,
-                                             (u16)event.u.doorbell.rioid,
+                                             event.u.doorbell.rioid,
                                              event.u.doorbell.payload);
                if (ret < 0)
                        return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
        struct mport_dev *md;
        struct rio_mport_attr attr;
 
-       md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL);
+       md = kzalloc(sizeof(*md), GFP_KERNEL);
        if (!md) {
                rmcd_error("Unable allocate a device object");
                return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
        /* The transfer_mode property will be returned through mport query
         * interface
         */
-#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
+#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
        md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
 #else
        md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
index 5548a31e1a39a100142b45841cbe38e1aa007e38..1fcbb22a4a1c6c7b7530481a164cc9d115648cd2 100644 (file)
@@ -274,7 +274,7 @@ check_spm:
        return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
 }
 
-static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
        .suspend = qcom_idle_enter,
        .init = qcom_cpuidle_init,
 };
index 14718a9ffcfb18d398f8ab5ae2cb3df99d4994db..460c855be0d020bd21d3cc024d0a33d705b0d59d 100644 (file)
@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
 
        return retval;
 }
-
-static int usb_port_prepare(struct device *dev)
-{
-       return 1;
-}
 #endif
 
 static const struct dev_pm_ops usb_port_pm_ops = {
 #ifdef CONFIG_PM
        .runtime_suspend =      usb_port_runtime_suspend,
        .runtime_resume =       usb_port_runtime_resume,
-       .prepare =              usb_port_prepare,
 #endif
 };
 
index dcb85e3cd5a76ab40e963f0e6dd402dfb63fa219..479187c32571d7efa9e49dccb3ea7d386102fd13 100644 (file)
@@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 static int usb_dev_prepare(struct device *dev)
 {
-       struct usb_device *udev = to_usb_device(dev);
-
-       /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
-       if (udev->do_remote_wakeup != device_may_wakeup(dev))
-               return 0;
-
-       return 1;
+       return 0;               /* Implement eventually? */
 }
 
 static void usb_dev_complete(struct device *dev)
index 5e5a8fa005f8befc1ca9eb4814b12afbf8e3ccea..bc8889956d172b293e6771b2cd7e1e604005b2f1 100644 (file)
@@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb)
 {
        usb_phy_generic_register();
        musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
-       if (!musb->xceiv) {
+       if (IS_ERR(musb->xceiv)) {
                pr_err("HS UDC: no transceiver configured\n");
-               return -ENODEV;
+               return PTR_ERR(musb->xceiv);
        }
 
        /* Silicon does not implement ConfigData register.
index 87bd578799a8a9eb47339177cfc63a5b3bba5a5d..152865b3652295caf52340ca86397d5494f34c84 100644 (file)
@@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep)
                musb_writew(epio, MUSB_RXMAXP, 0);
        }
 
-       musb_ep->desc = NULL;
-       musb_ep->end_point.desc = NULL;
-
        /* abort all pending DMA and requests */
        nuke(musb_ep, -ESHUTDOWN);
 
+       musb_ep->desc = NULL;
+       musb_ep->end_point.desc = NULL;
+
        schedule_work(&musb->irq_work);
 
        spin_unlock_irqrestore(&(musb->lock), flags);
index 58487a4735218518ce50326750ca8718aaa2a516..2f8ad7f1f482cd55116b8cb3a7fd575fc79c360a 100644 (file)
@@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = {
        .description            = "musb-hcd",
        .product_desc           = "MUSB HDRC host driver",
        .hcd_priv_size          = sizeof(struct musb *),
-       .flags                  = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags                  = HCD_USB2 | HCD_MEMORY,
 
        /* not using irq handler or reset hooks from usbcore, since
         * those must be shared with peripheral code for OTG configs
index dd47823bb014c4664fed94181b33fbcd5f8d6e85..7c9f25e9c422a278ae71ea37de6a5c367a6e58cf 100644 (file)
@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
        { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+       { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
        { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+       { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+       { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
        { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
index 5c802d47892c5091c971eaef256310f99b4d175d..ca6bfddaacad2beeb1897a0e397bd5300ccff30d 100644 (file)
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue(
        const char *name)
 {
        struct virtqueue *vq;
-       void *queue;
+       void *queue = NULL;
        dma_addr_t dma_addr;
        size_t queue_size_in_bytes;
        struct vring vring;
index 9781e0dd59d6fc6a1b02da5e51e516e27ccafad1..d46839f51e730ff50e2c756a9f4b1326af82e2ec 100644 (file)
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
+static void release_memory_resource(struct resource *resource);
+
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                return NULL;
        }
 
+#ifdef CONFIG_SPARSEMEM
+       {
+               unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
+               unsigned long pfn = res->start >> PAGE_SHIFT;
+
+               if (pfn > limit) {
+                       pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
+                              pfn, limit);
+                       release_memory_resource(res);
+                       return NULL;
+               }
+       }
+#endif
+
        return res;
 }
 
index 38272ad245516c272ab571bed1a5c962f4a0e808..f4edd6df3df235c55aef1329cb8e17b5ae8cf9f7 100644 (file)
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
 {
        unsigned int new_size;
        evtchn_port_t *new_ring, *old_ring;
-       unsigned int p, c;
 
        /*
         * Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
        /*
         * Copy the old ring contents to the new ring.
         *
-        * If the ring contents crosses the end of the current ring,
-        * it needs to be copied in two chunks.
+        * To take care of wrapping, a full ring, and the new index
+        * pointing into the second half, simply copy the old contents
+        * twice.
         *
         * +---------+    +------------------+
-        * |34567  12| -> |       1234567    |
-        * +-----p-c-+    +------------------+
+        * |34567  12| -> |34567  1234567  12|
+        * +-----p-c-+    +-------c------p---+
         */
-       p = evtchn_ring_offset(u, u->ring_prod);
-       c = evtchn_ring_offset(u, u->ring_cons);
-       if (p < c) {
-               memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
-               memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
-       } else
-               memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+       memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
+       memcpy(new_ring + u->ring_size, old_ring,
+              u->ring_size * sizeof(*u->ring));
 
        u->ring = new_ring;
        u->ring_size = new_size;
index 719924d6c7062bf9af20a0ecd561a77841954b17..dcad5e2105252fa277a1c66ef83d8a960d2a082b 100644 (file)
@@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
 
        *nbytesp = nbytes;
 
-       return ret;
+       return ret < 0 ? ret : 0;
 }
 
 static inline int fuse_iter_npages(const struct iov_iter *ii_p)
index c524fdddc7fb1f601d6d06ddb46d0affcca89c08..99899705b1055411723c3e5919ecbc1f50139e14 100644 (file)
@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
 
 /* all accesses are serialized by namespace_sem */
 static struct user_namespace *user_ns;
-static struct mount *last_dest, *last_source, *dest_master;
+static struct mount *last_dest, *first_source, *last_source, *dest_master;
 static struct mountpoint *mp;
 static struct hlist_head *list;
 
@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
                type = CL_MAKE_SHARED;
        } else {
                struct mount *n, *p;
+               bool done;
                for (n = m; ; n = p) {
                        p = n->mnt_master;
-                       if (p == dest_master || IS_MNT_MARKED(p)) {
-                               while (last_dest->mnt_master != p) {
-                                       last_source = last_source->mnt_master;
-                                       last_dest = last_source->mnt_parent;
-                               }
-                               if (!peers(n, last_dest)) {
-                                       last_source = last_source->mnt_master;
-                                       last_dest = last_source->mnt_parent;
-                               }
+                       if (p == dest_master || IS_MNT_MARKED(p))
                                break;
-                       }
                }
+               do {
+                       struct mount *parent = last_source->mnt_parent;
+                       if (last_source == first_source)
+                               break;
+                       done = parent->mnt_master == p;
+                       if (done && peers(n, parent))
+                               break;
+                       last_source = last_source->mnt_master;
+               } while (!done);
+
                type = CL_SLAVE;
                /* beginning of peer group among the slaves? */
                if (IS_MNT_SHARED(m))
@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
         */
        user_ns = current->nsproxy->mnt_ns->user_ns;
        last_dest = dest_mnt;
+       first_source = source_mnt;
        last_source = source_mnt;
        mp = dest_mp;
        list = tree_list;
index b1755b23893e5e34513582d51d7a9cf9ed6b7227..92e37e224cd22d66d0b9675f1790e110d5e7b415 100644 (file)
@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
        struct mm_struct *mm = file->private_data;
        unsigned long env_start, env_end;
 
-       if (!mm)
+       /* Ensure the process spawned far enough to have an environment. */
+       if (!mm || !mm->env_end)
                return 0;
 
        page = (char *)__get_free_page(GFP_TEMPORARY);
index fa92fe839fda2f989e3d52c368cc7520b80679e5..36661acaf33b4f61d27f8f48d467ced432521b3c 100644 (file)
@@ -919,14 +919,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 #endif
        }
 
-       ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+       ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
        if (ret < 0)
                goto out_bh;
 
        strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
        udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
-       ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+       ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
        if (ret < 0)
                goto out_bh;
 
index 972b70625614f837310e39d41f6d8e836d205144..263829ef1873644a16ac3340555291a7fc240a4a 100644 (file)
@@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int,
                            uint8_t *, int);
 extern int udf_put_filename(struct super_block *, const uint8_t *, int,
                            uint8_t *, int);
-extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
+extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
index 3ff42f4437f3eb3374fea6b1cd0e839b71b65577..695389a4fc239f245cfacfa5a1e2fde9eae5b4a7 100644 (file)
@@ -335,9 +335,21 @@ try_again:
        return u_len;
 }
 
-int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
+                     const uint8_t *ocu_i, int i_len)
 {
-       return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+       int s_len = 0;
+
+       if (i_len > 0) {
+               s_len = ocu_i[i_len - 1];
+               if (s_len >= i_len) {
+                       pr_err("incorrect dstring lengths (%d/%d)\n",
+                              s_len, i_len);
+                       return -EINVAL;
+               }
+       }
+
+       return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
                                 udf_uni2char_utf8, 0);
 }
 
index 14362a84c78e3ca1d5712d284d6dce22ed656ba1..3a932501d69078951ee4e70c1acd4ae429ca2a85 100644 (file)
@@ -394,13 +394,13 @@ struct acpi_data_node {
 
 static inline bool is_acpi_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && (fwnode->type == FWNODE_ACPI
+       return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
                || fwnode->type == FWNODE_ACPI_DATA);
 }
 
 static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_ACPI;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
 }
 
 static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
index 21ee41b92e8aaad2030f0d86c614dc4bfc4b5c5a..f1d5c5acc8dd58cd92df76915391b4737d90c083 100644 (file)
@@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
deleted file mode 100644 (file)
index 0414009..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2014 Marvell
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __CPUFREQ_DT_H__
-#define __CPUFREQ_DT_H__
-
-struct cpufreq_dt_platform_data {
-       /*
-        * True when each CPU has its own clock to control its
-        * frequency, false when all CPUs are controlled by a single
-        * clock.
-        */
-       bool independent_clocks;
-};
-
-#endif /* __CPUFREQ_DT_H__ */
index 718e8725de8aa4d1d13004af368c8585e4f655a4..4e81e08db7522f12b0972c56739474a4f4825ee8 100644 (file)
@@ -102,6 +102,17 @@ struct cpufreq_policy {
         */
        struct rw_semaphore     rwsem;
 
+       /*
+        * Fast switch flags:
+        * - fast_switch_possible should be set by the driver if it can
+        *   guarantee that frequency can be changed on any CPU sharing the
+        *   policy and that the change will affect all of the policy CPUs then.
+        * - fast_switch_enabled is to be set by governors that support fast
+        *   freqnency switching with the help of cpufreq_enable_fast_switch().
+        */
+       bool                    fast_switch_possible;
+       bool                    fast_switch_enabled;
+
        /* Synchronization for frequency transitions */
        bool                    transition_ongoing; /* Tracks transition status */
        spinlock_t              transition_lock;
@@ -156,6 +167,8 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
 int cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
 #else
 static inline unsigned int cpufreq_get(unsigned int cpu)
 {
@@ -236,6 +249,8 @@ struct cpufreq_driver {
                                  unsigned int relation);       /* Deprecated */
        int             (*target_index)(struct cpufreq_policy *policy,
                                        unsigned int index);
+       unsigned int    (*fast_switch)(struct cpufreq_policy *policy,
+                                      unsigned int target_freq);
        /*
         * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
         * unset.
@@ -426,6 +441,20 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
 #define CPUFREQ_POLICY_POWERSAVE       (1)
 #define CPUFREQ_POLICY_PERFORMANCE     (2)
 
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * ondemand governor will work on any processor with transition latency <= 10ms,
+ * using appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
+ * the ondemand governor will not work. All times here are in us (microseconds).
+ */
+#define MIN_SAMPLING_RATE_RATIO                (2)
+#define LATENCY_MULTIPLIER             (1000)
+#define MIN_LATENCY_MULTIPLIER         (20)
+#define TRANSITION_LATENCY_LIMIT       (10 * 1000 * 1000)
+
 /* Governor Events */
 #define CPUFREQ_GOV_START      1
 #define CPUFREQ_GOV_STOP       2
@@ -450,6 +479,8 @@ struct cpufreq_governor {
 };
 
 /* Pass a target to the cpufreq driver */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+                                       unsigned int target_freq);
 int cpufreq_driver_target(struct cpufreq_policy *policy,
                                 unsigned int target_freq,
                                 unsigned int relation);
@@ -462,6 +493,29 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
 struct cpufreq_governor *cpufreq_default_governor(void);
 struct cpufreq_governor *cpufreq_fallback_governor(void);
 
+/* Governor attribute set */
+struct gov_attr_set {
+       struct kobject kobj;
+       struct list_head policy_list;
+       struct mutex update_lock;
+       int usage_count;
+};
+
+/* sysfs ops for cpufreq governors */
+extern const struct sysfs_ops governor_sysfs_ops;
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+
+/* Governor sysfs attribute */
+struct governor_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
+       ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
+                        size_t count);
+};
+
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
index 1afde47e1528c36ad8613c2e56fb23d1f614e476..79c52fa81cac9ea2dd3a1bea6e1093cd81fdb79c 100644 (file)
 #error Wordsize not 32 or 64
 #endif
 
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
 static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
-       hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+       hash = hash * GOLDEN_RATIO_64;
 #else
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
        u64 n = hash;
index d5569734f6724d6194497fe9842e9aac56ef0b09..548fd535fd02399634bace0607471d07b973d8dc 100644 (file)
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
        return (struct ethhdr *)skb_mac_header(skb);
 }
 
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 
 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
index 49175e4ced11288c535eb41cf967683858d6bc3b..f840d77c6c313cdeecad084e366a762b00512319 100644 (file)
@@ -246,7 +246,15 @@ do {                                                               \
        net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)                         \
        net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...)                                  \
+do {                                                                   \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
+           net_ratelimit())                                            \
+               __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);    \
+} while (0)
+#elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)                          \
        net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
 #else
index 8395308a24456028f22621c5e00f757c4c817576..b3c46b019ac1435c9da1d6c308b14b3ad849d279 100644 (file)
@@ -4004,7 +4004,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
-       netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+       netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
 
        /* check flags correspondence */
        BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
index 7fcb681baadf172824017d067721c65189638565..31758036787c3a490bb4aa31dda27b8a12be48da 100644 (file)
@@ -133,7 +133,7 @@ void of_core_init(void);
 
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_OF;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
 }
 
 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
index f4ed4f1b0c77ac84f65a4586502863fd78448063..6b052aa7b5b79de6712ee36fe3a424f6f93bd548 100644 (file)
@@ -516,6 +516,27 @@ static inline int PageTransCompound(struct page *page)
        return PageCompound(page);
 }
 
+/*
+ * PageTransCompoundMap is the same as PageTransCompound, but it also
+ * guarantees the primary MMU has the entire compound page mapped
+ * through pmd_trans_huge, which in turn guarantees the secondary MMUs
+ * can also map the entire compound page. This allows the secondary
+ * MMUs to call get_user_pages() only once for each compound page and
+ * to immediately map the entire compound page with a single secondary
+ * MMU fault. If there will be a pmd split later, the secondary MMUs
+ * will get an update through the MMU notifier invalidation through
+ * split_huge_pmd().
+ *
+ * Unlike PageTransCompound, this is safe to be called only while
+ * split_huge_pmd() cannot run from under us, like if protected by the
+ * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * positives.
+ */
+static inline int PageTransCompoundMap(struct page *page)
+{
+       return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+}
+
 /*
  * PageTransTail returns true for both transparent huge pages
  * and hugetlbfs pages, so it should only be called when it's known
@@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
 #else
 TESTPAGEFLAG_FALSE(TransHuge)
 TESTPAGEFLAG_FALSE(TransCompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap)
 TESTPAGEFLAG_FALSE(TransTail)
 TESTPAGEFLAG_FALSE(DoubleMap)
        TESTSETFLAG_FALSE(DoubleMap)
index cccaf4a29e9f02c9a60b65f73a523a69efa5af3a..bca26157f5b695fa1d2b6381800ad660faf972f5 100644 (file)
@@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev);
 int dev_pm_opp_set_regulator(struct device *dev, const char *name);
 void dev_pm_opp_put_regulator(struct device *dev);
 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
+void dev_pm_opp_remove_table(struct device *dev);
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
 #else
 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
@@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
                                        unsigned long freq, bool available)
 {
-       return ERR_PTR(-EINVAL);
+       return ERR_PTR(-ENOTSUPP);
 }
 
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                                        unsigned long *freq)
 {
-       return ERR_PTR(-EINVAL);
+       return ERR_PTR(-ENOTSUPP);
 }
 
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                                        unsigned long *freq)
 {
-       return ERR_PTR(-EINVAL);
+       return ERR_PTR(-ENOTSUPP);
 }
 
 static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
                                        unsigned long u_volt)
 {
-       return -EINVAL;
+       return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
 static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
                                                        struct device *dev)
 {
-       return ERR_PTR(-EINVAL);
+       return ERR_PTR(-ENOTSUPP);
 }
 
 static inline int dev_pm_opp_set_supported_hw(struct device *dev,
                                              const u32 *versions,
                                              unsigned int count)
 {
-       return -EINVAL;
+       return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
 
 static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 {
-       return -EINVAL;
+       return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
 
 static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
 {
-       return -EINVAL;
+       return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_put_regulator(struct device *dev) {}
 
 static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+       return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
+{
+       return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 {
        return -EINVAL;
 }
 
+static inline void dev_pm_opp_remove_table(struct device *dev)
+{
+}
+
+static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+}
+
 #endif         /* CONFIG_PM_OPP */
 
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
 int dev_pm_opp_of_add_table(struct device *dev);
 void dev_pm_opp_of_remove_table(struct device *dev);
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask);
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask);
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
 #else
 static inline int dev_pm_opp_of_add_table(struct device *dev)
 {
-       return -EINVAL;
+       return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_of_remove_table(struct device *dev)
 {
 }
 
-static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
-       return -ENOSYS;
-}
-
-static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
 {
+       return -ENOTSUPP;
 }
 
-static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
 {
-       return -ENOSYS;
 }
 
-static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 {
-       return -ENOSYS;
+       return -ENOTSUPP;
 }
 #endif
 
diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h
deleted file mode 100644 (file)
index b65d19d..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Integrated Device Technology Inc.
- * Copyright (c) 2015, Prodrive Technologies
- * Copyright (c) 2015, Texas Instruments Incorporated
- * Copyright (c) 2015, RapidIO Trade Association
- * All rights reserved.
- *
- * This software is available to you under a choice of one of two licenses.
- * You may choose to be licensed under the terms of the GNU General Public
- * License(GPL) Version 2, or the BSD-3 Clause license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its contributors
- * may be used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RIO_MPORT_CDEV_H_
-#define _RIO_MPORT_CDEV_H_
-
-#ifndef __user
-#define __user
-#endif
-
-struct rio_mport_maint_io {
-       uint32_t rioid;         /* destID of remote device */
-       uint32_t hopcount;      /* hopcount to remote device */
-       uint32_t offset;        /* offset in register space */
-       size_t length;          /* length in bytes */
-       void __user *buffer;    /* data buffer */
-};
-
-/*
- * Definitions for RapidIO data transfers:
- * - memory mapped (MAPPED)
- * - packet generation from memory (TRANSFER)
- */
-#define RIO_TRANSFER_MODE_MAPPED       (1 << 0)
-#define RIO_TRANSFER_MODE_TRANSFER     (1 << 1)
-#define RIO_CAP_DBL_SEND               (1 << 2)
-#define RIO_CAP_DBL_RECV               (1 << 3)
-#define RIO_CAP_PW_SEND                        (1 << 4)
-#define RIO_CAP_PW_RECV                        (1 << 5)
-#define RIO_CAP_MAP_OUTB               (1 << 6)
-#define RIO_CAP_MAP_INB                        (1 << 7)
-
-struct rio_mport_properties {
-       uint16_t hdid;
-       uint8_t id;                     /* Physical port ID */
-       uint8_t  index;
-       uint32_t flags;
-       uint32_t sys_size;              /* Default addressing size */
-       uint8_t  port_ok;
-       uint8_t  link_speed;
-       uint8_t  link_width;
-       uint32_t dma_max_sge;
-       uint32_t dma_max_size;
-       uint32_t dma_align;
-       uint32_t transfer_mode;         /* Default transfer mode */
-       uint32_t cap_sys_size;          /* Capable system sizes */
-       uint32_t cap_addr_size;         /* Capable addressing sizes */
-       uint32_t cap_transfer_mode;     /* Capable transfer modes */
-       uint32_t cap_mport;             /* Mport capabilities */
-};
-
-/*
- * Definitions for RapidIO events;
- * - incoming port-writes
- * - incoming doorbells
- */
-#define RIO_DOORBELL   (1 << 0)
-#define RIO_PORTWRITE  (1 << 1)
-
-struct rio_doorbell {
-       uint32_t rioid;
-       uint16_t payload;
-};
-
-struct rio_doorbell_filter {
-       uint32_t rioid;                 /* 0xffffffff to match all ids */
-       uint16_t low;
-       uint16_t high;
-};
-
-
-struct rio_portwrite {
-       uint32_t payload[16];
-};
-
-struct rio_pw_filter {
-       uint32_t mask;
-       uint32_t low;
-       uint32_t high;
-};
-
-/* RapidIO base address for inbound requests set to value defined below
- * indicates that no specific RIO-to-local address translation is requested
- * and driver should use direct (one-to-one) address mapping.
-*/
-#define RIO_MAP_ANY_ADDR       (uint64_t)(~((uint64_t) 0))
-
-struct rio_mmap {
-       uint32_t rioid;
-       uint64_t rio_addr;
-       uint64_t length;
-       uint64_t handle;
-       void *address;
-};
-
-struct rio_dma_mem {
-       uint64_t length;                /* length of DMA memory */
-       uint64_t dma_handle;            /* handle associated with this memory */
-       void *buffer;                   /* pointer to this memory */
-};
-
-
-struct rio_event {
-       unsigned int header;    /* event type RIO_DOORBELL or RIO_PORTWRITE */
-       union {
-               struct rio_doorbell doorbell;   /* header for RIO_DOORBELL */
-               struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
-       } u;
-};
-
-enum rio_transfer_sync {
-       RIO_TRANSFER_SYNC,      /* synchronous transfer */
-       RIO_TRANSFER_ASYNC,     /* asynchronous transfer */
-       RIO_TRANSFER_FAF,       /* fire-and-forget transfer */
-};
-
-enum rio_transfer_dir {
-       RIO_TRANSFER_DIR_READ,  /* Read operation */
-       RIO_TRANSFER_DIR_WRITE, /* Write operation */
-};
-
-/*
- * RapidIO data exchange transactions are lists of individual transfers. Each
- * transfer exchanges data between two RapidIO devices by remote direct memory
- * access and has its own completion code.
- *
- * The RapidIO specification defines four types of data exchange requests:
- * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
- * to specify the required type of write operation or combination of them when
- * only the last data packet requires response.
- *
- * NREAD:    read up to 256 bytes from remote device memory into local memory
- * NWRITE:   write up to 256 bytes from local memory to remote device memory
- *           without confirmation
- * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
- * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
- *
- * The default exchange is chosen from NREAD and any of the WRITE modes as the
- * driver sees fit. For write requests the user can explicitly choose between
- * any of the write modes for each transaction.
- */
-enum rio_exchange {
-       RIO_EXCHANGE_DEFAULT,   /* Default method */
-       RIO_EXCHANGE_NWRITE,    /* All packets using NWRITE */
-       RIO_EXCHANGE_SWRITE,    /* All packets using SWRITE */
-       RIO_EXCHANGE_NWRITE_R,  /* Last packet NWRITE_R, others NWRITE */
-       RIO_EXCHANGE_SWRITE_R,  /* Last packet NWRITE_R, others SWRITE */
-       RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
-};
-
-struct rio_transfer_io {
-       uint32_t rioid;                 /* Target destID */
-       uint64_t rio_addr;              /* Address in target's RIO mem space */
-       enum rio_exchange method;       /* Data exchange method */
-       void __user *loc_addr;
-       uint64_t handle;
-       uint64_t offset;                /* Offset in buffer */
-       uint64_t length;                /* Length in bytes */
-       uint32_t completion_code;       /* Completion code for this transfer */
-};
-
-struct rio_transaction {
-       uint32_t transfer_mode;         /* Data transfer mode */
-       enum rio_transfer_sync sync;    /* Synchronization method */
-       enum rio_transfer_dir dir;      /* Transfer direction */
-       size_t count;                   /* Number of transfers */
-       struct rio_transfer_io __user *block;   /* Array of <count> transfers */
-};
-
-struct rio_async_tx_wait {
-       uint32_t token;         /* DMA transaction ID token */
-       uint32_t timeout;       /* Wait timeout in msec, if 0 use default TO */
-};
-
-#define RIO_MAX_DEVNAME_SZ     20
-
-struct rio_rdev_info {
-       uint32_t destid;
-       uint8_t hopcount;
-       uint32_t comptag;
-       char name[RIO_MAX_DEVNAME_SZ + 1];
-};
-
-/* Driver IOCTL codes */
-#define RIO_MPORT_DRV_MAGIC           'm'
-
-#define RIO_MPORT_MAINT_HDID_SET       \
-       _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
-#define RIO_MPORT_MAINT_COMPTAG_SET    \
-       _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
-#define RIO_MPORT_MAINT_PORT_IDX_GET   \
-       _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
-#define RIO_MPORT_GET_PROPERTIES \
-       _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
-#define RIO_MPORT_MAINT_READ_LOCAL \
-       _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_LOCAL \
-       _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_READ_REMOTE \
-       _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_REMOTE \
-       _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
-#define RIO_ENABLE_DOORBELL_RANGE      \
-       _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
-#define RIO_DISABLE_DOORBELL_RANGE     \
-       _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
-#define RIO_ENABLE_PORTWRITE_RANGE     \
-       _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
-#define RIO_DISABLE_PORTWRITE_RANGE    \
-       _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
-#define RIO_SET_EVENT_MASK             \
-       _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
-#define RIO_GET_EVENT_MASK             \
-       _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
-#define RIO_MAP_OUTBOUND \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
-#define RIO_UNMAP_OUTBOUND \
-       _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
-#define RIO_MAP_INBOUND \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
-#define RIO_UNMAP_INBOUND \
-       _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
-#define RIO_ALLOC_DMA \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
-#define RIO_FREE_DMA \
-       _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
-#define RIO_TRANSFER \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
-#define RIO_WAIT_FOR_ASYNC \
-       _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
-#define RIO_DEV_ADD \
-       _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
-#define RIO_DEV_DEL \
-       _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
-
-#endif /* _RIO_MPORT_CDEV_H_ */
index 52c4847b05e2882a72d04c3c75fc4d55c2b4a6b9..8344e1947eec392c3c3731f0dd81bbe71044fb3c 100644 (file)
@@ -3240,7 +3240,10 @@ struct update_util_data {
                     u64 time, unsigned long util, unsigned long max);
 };
 
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+                       void (*func)(struct update_util_data *data, u64 time,
+                                    unsigned long util, unsigned long max));
+void cpufreq_remove_update_util_hook(int cpu);
 #endif /* CONFIG_CPU_FREQ */
 
 #endif
index 2b83359c19cabee6dc211c887518d001f05081f4..0a4cd4703f403f0d65f867aaa6dcc3e128fcde05 100644 (file)
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
 #ifdef CONFIG_MEMCG
 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 {
+       /* Cgroup2 doesn't have per-cgroup swappiness */
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return vm_swappiness;
+
        /* root ? */
        if (mem_cgroup_disabled() || !memcg->css.parent)
                return vm_swappiness;
index 73ed2e951c020d28c21457d907c55764ea7b3d88..35437c779da8d6d6d113ed0deedbd39428852238 100644 (file)
@@ -252,7 +252,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
            (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
             skb->inner_protocol != htons(ETH_P_TEB) ||
             (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+            (skb->ip_summed != CHECKSUM_NONE &&
+             !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
                return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
        return features;
index 6e0f5f01734cb893bfec236bd39bb012d333e510..c51afb71bfabc658b16be2773ad5a0e78ca1d09b 100644 (file)
@@ -718,9 +718,9 @@ __SYSCALL(__NR_mlock2, sys_mlock2)
 #define __NR_copy_file_range 285
 __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
 #define __NR_preadv2 286
-__SYSCALL(__NR_preadv2, sys_preadv2)
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
 #define __NR_pwritev2 287
-__SYSCALL(__NR_pwritev2, sys_pwritev2)
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
 
 #undef __NR_syscalls
 #define __NR_syscalls 288
diff --git a/include/uapi/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
new file mode 100644 (file)
index 0000000..5796bf1
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, Texas Instruments Incorporated
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_MPORT_CDEV_H_
+#define _RIO_MPORT_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct rio_mport_maint_io {
+       __u16 rioid;            /* destID of remote device */
+       __u8  hopcount;         /* hopcount to remote device */
+       __u8  pad0[5];
+       __u32 offset;           /* offset in register space */
+       __u32 length;           /* length in bytes */
+       __u64 buffer;           /* pointer to data buffer */
+};
+
+/*
+ * Definitions for RapidIO data transfers:
+ * - memory mapped (MAPPED)
+ * - packet generation from memory (TRANSFER)
+ */
+#define RIO_TRANSFER_MODE_MAPPED       (1 << 0)
+#define RIO_TRANSFER_MODE_TRANSFER     (1 << 1)
+#define RIO_CAP_DBL_SEND               (1 << 2)
+#define RIO_CAP_DBL_RECV               (1 << 3)
+#define RIO_CAP_PW_SEND                        (1 << 4)
+#define RIO_CAP_PW_RECV                        (1 << 5)
+#define RIO_CAP_MAP_OUTB               (1 << 6)
+#define RIO_CAP_MAP_INB                        (1 << 7)
+
+struct rio_mport_properties {
+       __u16 hdid;
+       __u8  id;                       /* Physical port ID */
+       __u8  index;
+       __u32 flags;
+       __u32 sys_size;         /* Default addressing size */
+       __u8  port_ok;
+       __u8  link_speed;
+       __u8  link_width;
+       __u8  pad0;
+       __u32 dma_max_sge;
+       __u32 dma_max_size;
+       __u32 dma_align;
+       __u32 transfer_mode;            /* Default transfer mode */
+       __u32 cap_sys_size;             /* Capable system sizes */
+       __u32 cap_addr_size;            /* Capable addressing sizes */
+       __u32 cap_transfer_mode;        /* Capable transfer modes */
+       __u32 cap_mport;                /* Mport capabilities */
+};
+
+/*
+ * Definitions for RapidIO events;
+ * - incoming port-writes
+ * - incoming doorbells
+ */
+#define RIO_DOORBELL   (1 << 0)
+#define RIO_PORTWRITE  (1 << 1)
+
+struct rio_doorbell {
+       __u16 rioid;
+       __u16 payload;
+};
+
+struct rio_doorbell_filter {
+       __u16 rioid;    /* Use RIO_INVALID_DESTID to match all ids */
+       __u16 low;
+       __u16 high;
+       __u16 pad0;
+};
+
+
+struct rio_portwrite {
+       __u32 payload[16];
+};
+
+struct rio_pw_filter {
+       __u32 mask;
+       __u32 low;
+       __u32 high;
+       __u32 pad0;
+};
+
+/* RapidIO base address for inbound requests set to value defined below
+ * indicates that no specific RIO-to-local address translation is requested
+ * and driver should use direct (one-to-one) address mapping.
+*/
+#define RIO_MAP_ANY_ADDR       (__u64)(~((__u64) 0))
+
+struct rio_mmap {
+       __u16 rioid;
+       __u16 pad0[3];
+       __u64 rio_addr;
+       __u64 length;
+       __u64 handle;
+       __u64 address;
+};
+
+struct rio_dma_mem {
+       __u64 length;           /* length of DMA memory */
+       __u64 dma_handle;       /* handle associated with this memory */
+       __u64 address;
+};
+
+struct rio_event {
+       __u32 header;   /* event type RIO_DOORBELL or RIO_PORTWRITE */
+       union {
+               struct rio_doorbell doorbell;   /* header for RIO_DOORBELL */
+               struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
+       } u;
+       __u32 pad0;
+};
+
+enum rio_transfer_sync {
+       RIO_TRANSFER_SYNC,      /* synchronous transfer */
+       RIO_TRANSFER_ASYNC,     /* asynchronous transfer */
+       RIO_TRANSFER_FAF,       /* fire-and-forget transfer */
+};
+
+enum rio_transfer_dir {
+       RIO_TRANSFER_DIR_READ,  /* Read operation */
+       RIO_TRANSFER_DIR_WRITE, /* Write operation */
+};
+
+/*
+ * RapidIO data exchange transactions are lists of individual transfers. Each
+ * transfer exchanges data between two RapidIO devices by remote direct memory
+ * access and has its own completion code.
+ *
+ * The RapidIO specification defines four types of data exchange requests:
+ * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
+ * to specify the required type of write operation or combination of them when
+ * only the last data packet requires response.
+ *
+ * NREAD:    read up to 256 bytes from remote device memory into local memory
+ * NWRITE:   write up to 256 bytes from local memory to remote device memory
+ *           without confirmation
+ * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
+ * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
+ *
+ * The default exchange is chosen from NREAD and any of the WRITE modes as the
+ * driver sees fit. For write requests the user can explicitly choose between
+ * any of the write modes for each transaction.
+ */
+enum rio_exchange {
+       RIO_EXCHANGE_DEFAULT,   /* Default method */
+       RIO_EXCHANGE_NWRITE,    /* All packets using NWRITE */
+       RIO_EXCHANGE_SWRITE,    /* All packets using SWRITE */
+       RIO_EXCHANGE_NWRITE_R,  /* Last packet NWRITE_R, others NWRITE */
+       RIO_EXCHANGE_SWRITE_R,  /* Last packet NWRITE_R, others SWRITE */
+       RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
+};
+
+struct rio_transfer_io {
+       __u64 rio_addr; /* Address in target's RIO mem space */
+       __u64 loc_addr;
+       __u64 handle;
+       __u64 offset;   /* Offset in buffer */
+       __u64 length;   /* Length in bytes */
+       __u16 rioid;    /* Target destID */
+       __u16 method;   /* Data exchange method, one of rio_exchange enum */
+       __u32 completion_code;  /* Completion code for this transfer */
+};
+
+struct rio_transaction {
+       __u64 block;    /* Pointer to array of <count> transfers */
+       __u32 count;    /* Number of transfers */
+       __u32 transfer_mode;    /* Data transfer mode */
+       __u16 sync;     /* Synch method, one of rio_transfer_sync enum */
+       __u16 dir;      /* Transfer direction, one of rio_transfer_dir enum */
+       __u32 pad0;
+};
+
+struct rio_async_tx_wait {
+       __u32 token;    /* DMA transaction ID token */
+       __u32 timeout;  /* Wait timeout in msec, if 0 use default TO */
+};
+
+#define RIO_MAX_DEVNAME_SZ     20
+
+struct rio_rdev_info {
+       __u16 destid;
+       __u8 hopcount;
+       __u8 pad0;
+       __u32 comptag;
+       char name[RIO_MAX_DEVNAME_SZ + 1];
+};
+
+/* Driver IOCTL codes */
+#define RIO_MPORT_DRV_MAGIC           'm'
+
+#define RIO_MPORT_MAINT_HDID_SET       \
+       _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
+#define RIO_MPORT_MAINT_COMPTAG_SET    \
+       _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
+#define RIO_MPORT_MAINT_PORT_IDX_GET   \
+       _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
+#define RIO_MPORT_GET_PROPERTIES \
+       _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
+#define RIO_MPORT_MAINT_READ_LOCAL \
+       _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_LOCAL \
+       _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_READ_REMOTE \
+       _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_REMOTE \
+       _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
+#define RIO_ENABLE_DOORBELL_RANGE      \
+       _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
+#define RIO_DISABLE_DOORBELL_RANGE     \
+       _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
+#define RIO_ENABLE_PORTWRITE_RANGE     \
+       _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
+#define RIO_DISABLE_PORTWRITE_RANGE    \
+       _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
+#define RIO_SET_EVENT_MASK             \
+       _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
+#define RIO_GET_EVENT_MASK             \
+       _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
+#define RIO_MAP_OUTBOUND \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
+#define RIO_UNMAP_OUTBOUND \
+       _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
+#define RIO_MAP_INBOUND \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
+#define RIO_UNMAP_INBOUND \
+       _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
+#define RIO_ALLOC_DMA \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
+#define RIO_FREE_DMA \
+       _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
+#define RIO_TRANSFER \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
+#define RIO_WAIT_FOR_ASYNC \
+       _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
+#define RIO_DEV_ADD \
+       _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
+#define RIO_DEV_DEL \
+       _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
+
+#endif /* _RIO_MPORT_CDEV_H_ */
index 3f10e5317b46b3858479b2af949d5a719bc72405..8f3a8f606fd95b723d9ed26e179c8a741250717f 100644 (file)
@@ -45,9 +45,7 @@
 
 static inline __attribute_const__ __u16 __fswab16(__u16 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP16__
-       return __builtin_bswap16(val);
-#elif defined (__arch_swab16)
+#if defined (__arch_swab16)
        return __arch_swab16(val);
 #else
        return ___constant_swab16(val);
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
 
 static inline __attribute_const__ __u32 __fswab32(__u32 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP32__
-       return __builtin_bswap32(val);
-#elif defined(__arch_swab32)
+#if defined(__arch_swab32)
        return __arch_swab32(val);
 #else
        return ___constant_swab32(val);
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
 
 static inline __attribute_const__ __u64 __fswab64(__u64 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP64__
-       return __builtin_bswap64(val);
-#elif defined (__arch_swab64)
+#if defined (__arch_swab64)
        return __arch_swab64(val);
 #elif defined(__SWAB_64_THRU_32__)
        __u32 h = val >> 32;
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
  * __swab16 - return a byteswapped 16-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP16__
+#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
+#else
 #define __swab16(x)                            \
        (__builtin_constant_p((__u16)(x)) ?     \
        ___constant_swab16(x) :                 \
        __fswab16(x))
+#endif
 
 /**
  * __swab32 - return a byteswapped 32-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP32__
+#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
+#else
 #define __swab32(x)                            \
        (__builtin_constant_p((__u32)(x)) ?     \
        ___constant_swab32(x) :                 \
        __fswab32(x))
+#endif
 
 /**
  * __swab64 - return a byteswapped 64-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP64__
+#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
+#else
 #define __swab64(x)                            \
        (__builtin_constant_p((__u64)(x)) ?     \
        ___constant_swab64(x) :                 \
        __fswab64(x))
+#endif
 
 /**
  * __swahw32 - return a word-swapped 32-bit value
index 96294ac937552178531266cb16ce04185e47c99a..9dc46cb8a0fd79be7f4bdae7ae8f5f4dbdcdb7f4 100644 (file)
@@ -15,9 +15,9 @@
  */
 
 #define xen_pfn_to_page(xen_pfn)       \
-       ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+       (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
 #define page_to_xen_pfn(page)          \
-       (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+       ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
 
 #define XEN_PFN_PER_PAGE       (PAGE_SIZE / XEN_PAGE_SIZE)
 
index f2ece3c174a5b540b40492833048d6c8bd9f3f1c..8f94ca1860cfdcdd9509e40d40c3ad7cbf572e2d 100644 (file)
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
 {
        switch (type) {
        case BPF_TYPE_PROG:
-               atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+               raw = bpf_prog_inc(raw);
                break;
        case BPF_TYPE_MAP:
-               bpf_map_inc(raw, true);
+               raw = bpf_map_inc(raw, true);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
                goto out;
 
        raw = bpf_any_get(inode->i_private, *type);
-       touch_atime(&path);
+       if (!IS_ERR(raw))
+               touch_atime(&path);
 
        path_put(&path);
        return raw;
index adc5e4bd74f8bc3310cfe4b75257647886b2e11f..cf5e9f7ad13ad13ebb5b6bc5a775beca834c7c86 100644 (file)
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
        return f.file->private_data;
 }
 
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 {
-       atomic_inc(&map->refcnt);
+       if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&map->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
        if (uref)
                atomic_inc(&map->usercnt);
+       return map;
 }
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
        if (IS_ERR(map))
                return map;
 
-       bpf_map_inc(map, true);
+       map = bpf_map_inc(map, true);
        fdput(f);
 
        return map;
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
        return f.file->private_data;
 }
 
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+       if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&prog->aux->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
+       return prog;
+}
+
 /* called by sockets/tracing/seccomp before attaching program to an event
  * pairs with bpf_prog_put()
  */
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
        if (IS_ERR(prog))
                return prog;
 
-       atomic_inc(&prog->aux->refcnt);
+       prog = bpf_prog_inc(prog);
        fdput(f);
 
        return prog;
index db2574e7b8b008395bb8fee53baf513fdc56c392..c5c17a62f509f67385cfb9a587e702646cc19b80 100644 (file)
@@ -239,16 +239,6 @@ static const char * const reg_type_str[] = {
        [CONST_IMM]             = "imm",
 };
 
-static const struct {
-       int map_type;
-       int func_id;
-} func_limit[] = {
-       {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
-       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
-       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
-       {BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
-};
-
 static void print_verifier_state(struct verifier_env *env)
 {
        enum bpf_reg_type t;
@@ -921,27 +911,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
 
 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
 {
-       bool bool_map, bool_func;
-       int i;
-
        if (!map)
                return 0;
 
-       for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
-               bool_map = (map->map_type == func_limit[i].map_type);
-               bool_func = (func_id == func_limit[i].func_id);
-               /* only when map & func pair match it can continue.
-                * don't allow any other map type to be passed into
-                * the special func;
-                */
-               if (bool_func && bool_map != bool_func) {
-                       verbose("cannot pass map_type %d into func %d\n",
-                               map->map_type, func_id);
-                       return -EINVAL;
-               }
+       /* We need a two way check, first is from map perspective ... */
+       switch (map->map_type) {
+       case BPF_MAP_TYPE_PROG_ARRAY:
+               if (func_id != BPF_FUNC_tail_call)
+                       goto error;
+               break;
+       case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+               if (func_id != BPF_FUNC_perf_event_read &&
+                   func_id != BPF_FUNC_perf_event_output)
+                       goto error;
+               break;
+       case BPF_MAP_TYPE_STACK_TRACE:
+               if (func_id != BPF_FUNC_get_stackid)
+                       goto error;
+               break;
+       default:
+               break;
+       }
+
+       /* ... and second from the function itself. */
+       switch (func_id) {
+       case BPF_FUNC_tail_call:
+               if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+                       goto error;
+               break;
+       case BPF_FUNC_perf_event_read:
+       case BPF_FUNC_perf_event_output:
+               if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+                       goto error;
+               break;
+       case BPF_FUNC_get_stackid:
+               if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
+                       goto error;
+               break;
+       default:
+               break;
        }
 
        return 0;
+error:
+       verbose("cannot pass map_type %d into func %d\n",
+               map->map_type, func_id);
+       return -EINVAL;
 }
 
 static int check_call(struct verifier_env *env, int func_id)
@@ -2049,15 +2064,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
                                return -E2BIG;
                        }
 
-                       /* remember this map */
-                       env->used_maps[env->used_map_cnt++] = map;
-
                        /* hold the map. If the program is rejected by verifier,
                         * the map will be released by release_maps() or it
                         * will be used by the valid program until it's unloaded
                         * and all maps are released in free_bpf_prog_info()
                         */
-                       bpf_map_inc(map, false);
+                       map = bpf_map_inc(map, false);
+                       if (IS_ERR(map)) {
+                               fdput(f);
+                               return PTR_ERR(map);
+                       }
+                       env->used_maps[env->used_map_cnt++] = map;
+
                        fdput(f);
 next_insn:
                        insn++;
index 414d9c16da4210183af3b30ca20176f6f8397c43..5e59b832ae2b4b7447de17c105da19890763826d 100644 (file)
@@ -24,3 +24,4 @@ obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
 obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
index 8b489fcac37bd9d829439feb08d8bac1354c7e71..d1f7149f870439d65b9cfcfbc27d4160bbb1672f 100644 (file)
@@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
                return false;
 
        /*
-        * FIFO realtime policy runs the highest priority task (after DEADLINE).
-        * Other runnable tasks are of a lower priority. The scheduler tick
-        * isn't needed.
-        */
-       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-       if (fifo_nr_running)
-               return true;
-
-       /*
-        * Round-robin realtime tasks time slice with other tasks at the same
-        * realtime priority.
+        * If there are more than one RR tasks, we need the tick to effect the
+        * actual RR behaviour.
         */
        if (rq->rt.rr_nr_running) {
                if (rq->rt.rr_nr_running == 1)
@@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
                        return false;
        }
 
-       /* Normal multitasking need periodic preemption checks */
-       if (rq->cfs.nr_running > 1)
+       /*
+        * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+        * forced preemption between FIFO tasks.
+        */
+       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+       if (fifo_nr_running)
+               return true;
+
+       /*
+        * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+        * if there's more than one we need the tick for involuntary
+        * preemption.
+        */
+       if (rq->nr_running > 1)
                return false;
 
        return true;
index 928c4ba32f683022be6388c99b4df418e9db3ca8..1141954e73b46cf1104e179e6ec1a7ace530d87e 100644 (file)
 DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
 
 /**
- * cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer.
+ * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
  * @cpu: The CPU to set the pointer for.
  * @data: New pointer value.
+ * @func: Callback function to set for the CPU.
  *
- * Set and publish the update_util_data pointer for the given CPU.  That pointer
- * points to a struct update_util_data object containing a callback function
- * to call from cpufreq_update_util().  That function will be called from an RCU
- * read-side critical section, so it must not sleep.
+ * Set and publish the update_util_data pointer for the given CPU.
  *
- * Callers must use RCU-sched callbacks to free any memory that might be
- * accessed via the old update_util_data pointer or invoke synchronize_sched()
- * right after this function to avoid use-after-free.
+ * The update_util_data pointer of @cpu is set to @data and the callback
+ * function pointer in the target struct update_util_data is set to @func.
+ * That function will be called by cpufreq_update_util() from RCU-sched
+ * read-side critical sections, so it must not sleep.  @data will always be
+ * passed to it as the first argument which allows the function to get to the
+ * target update_util_data structure and its container.
+ *
+ * The update_util_data pointer of @cpu must be NULL when this function is
+ * called or it will WARN() and return with no effect.
  */
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data)
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+                       void (*func)(struct update_util_data *data, u64 time,
+                                    unsigned long util, unsigned long max))
 {
-       if (WARN_ON(data && !data->func))
+       if (WARN_ON(!data || !func))
                return;
 
+       if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
+               return;
+
+       data->func = func;
        rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
 }
-EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data);
+EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
+
+/**
+ * cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer.
+ * @cpu: The CPU to clear the pointer for.
+ *
+ * Clear the update_util_data pointer for the given CPU.
+ *
+ * Callers must use RCU-sched callbacks to free any memory that might be
+ * accessed via the old update_util_data pointer or invoke synchronize_sched()
+ * right after this function to avoid use-after-free.
+ */
+void cpufreq_remove_update_util_hook(int cpu)
+{
+       rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
+}
+EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
new file mode 100644 (file)
index 0000000..154ae3a
--- /dev/null
@@ -0,0 +1,530 @@
+/*
+ * CPUFreq governor based on scheduler-provided CPU utilization data.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <trace/events/power.h>
+
+#include "sched.h"
+
+struct sugov_tunables {
+       struct gov_attr_set attr_set;
+       unsigned int rate_limit_us;
+};
+
+struct sugov_policy {
+       struct cpufreq_policy *policy;
+
+       struct sugov_tunables *tunables;
+       struct list_head tunables_hook;
+
+       raw_spinlock_t update_lock;  /* For shared policies */
+       u64 last_freq_update_time;
+       s64 freq_update_delay_ns;
+       unsigned int next_freq;
+
+       /* The next fields are only needed if fast switch cannot be used. */
+       struct irq_work irq_work;
+       struct work_struct work;
+       struct mutex work_lock;
+       bool work_in_progress;
+
+       bool need_freq_update;
+};
+
+struct sugov_cpu {
+       struct update_util_data update_util;
+       struct sugov_policy *sg_policy;
+
+       /* The fields below are only needed when sharing a policy. */
+       unsigned long util;
+       unsigned long max;
+       u64 last_update;
+};
+
+static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+
+/************************ Governor internals ***********************/
+
+static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+{
+       s64 delta_ns;
+
+       if (sg_policy->work_in_progress)
+               return false;
+
+       if (unlikely(sg_policy->need_freq_update)) {
+               sg_policy->need_freq_update = false;
+               /*
+                * This happens when limits change, so forget the previous
+                * next_freq value and force an update.
+                */
+               sg_policy->next_freq = UINT_MAX;
+               return true;
+       }
+
+       delta_ns = time - sg_policy->last_freq_update_time;
+       return delta_ns >= sg_policy->freq_update_delay_ns;
+}
+
+static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
+                               unsigned int next_freq)
+{
+       struct cpufreq_policy *policy = sg_policy->policy;
+
+       sg_policy->last_freq_update_time = time;
+
+       if (policy->fast_switch_enabled) {
+               if (sg_policy->next_freq == next_freq) {
+                       trace_cpu_frequency(policy->cur, smp_processor_id());
+                       return;
+               }
+               sg_policy->next_freq = next_freq;
+               next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+               if (next_freq == CPUFREQ_ENTRY_INVALID)
+                       return;
+
+               policy->cur = next_freq;
+               trace_cpu_frequency(next_freq, smp_processor_id());
+       } else if (sg_policy->next_freq != next_freq) {
+               sg_policy->next_freq = next_freq;
+               sg_policy->work_in_progress = true;
+               irq_work_queue(&sg_policy->irq_work);
+       }
+}
+
+/**
+ * get_next_freq - Compute a new frequency for a given cpufreq policy.
+ * @policy: cpufreq policy object to compute the new frequency for.
+ * @util: Current CPU utilization.
+ * @max: CPU capacity.
+ *
+ * If the utilization is frequency-invariant, choose the new frequency to be
+ * proportional to it, that is
+ *
+ * next_freq = C * max_freq * util / max
+ *
+ * Otherwise, approximate the would-be frequency-invariant utilization by
+ * util_raw * (curr_freq / max_freq) which leads to
+ *
+ * next_freq = C * curr_freq * util_raw / max
+ *
+ * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
+ */
+static unsigned int get_next_freq(struct cpufreq_policy *policy,
+                                 unsigned long util, unsigned long max)
+{
+       unsigned int freq = arch_scale_freq_invariant() ?
+                               policy->cpuinfo.max_freq : policy->cur;
+
+       return (freq + (freq >> 2)) * util / max;
+}
+
+static void sugov_update_single(struct update_util_data *hook, u64 time,
+                               unsigned long util, unsigned long max)
+{
+       struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+       struct cpufreq_policy *policy = sg_policy->policy;
+       unsigned int next_f;
+
+       if (!sugov_should_update_freq(sg_policy, time))
+               return;
+
+       next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq :
+                       get_next_freq(policy, util, max);
+       sugov_update_commit(sg_policy, time, next_f);
+}
+
+static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy,
+                                          unsigned long util, unsigned long max)
+{
+       struct cpufreq_policy *policy = sg_policy->policy;
+       unsigned int max_f = policy->cpuinfo.max_freq;
+       u64 last_freq_update_time = sg_policy->last_freq_update_time;
+       unsigned int j;
+
+       if (util == ULONG_MAX)
+               return max_f;
+
+       for_each_cpu(j, policy->cpus) {
+               struct sugov_cpu *j_sg_cpu;
+               unsigned long j_util, j_max;
+               s64 delta_ns;
+
+               if (j == smp_processor_id())
+                       continue;
+
+               j_sg_cpu = &per_cpu(sugov_cpu, j);
+               /*
+                * If the CPU utilization was last updated before the previous
+                * frequency update and the time elapsed between the last update
+                * of the CPU utilization and the last frequency update is long
+                * enough, don't take the CPU into account as it probably is
+                * idle now.
+                */
+               delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+               if (delta_ns > TICK_NSEC)
+                       continue;
+
+               j_util = j_sg_cpu->util;
+               if (j_util == ULONG_MAX)
+                       return max_f;
+
+               j_max = j_sg_cpu->max;
+               if (j_util * max > j_max * util) {
+                       util = j_util;
+                       max = j_max;
+               }
+       }
+
+       return get_next_freq(policy, util, max);
+}
+
+static void sugov_update_shared(struct update_util_data *hook, u64 time,
+                               unsigned long util, unsigned long max)
+{
+       struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+       unsigned int next_f;
+
+       raw_spin_lock(&sg_policy->update_lock);
+
+       sg_cpu->util = util;
+       sg_cpu->max = max;
+       sg_cpu->last_update = time;
+
+       if (sugov_should_update_freq(sg_policy, time)) {
+               next_f = sugov_next_freq_shared(sg_policy, util, max);
+               sugov_update_commit(sg_policy, time, next_f);
+       }
+
+       raw_spin_unlock(&sg_policy->update_lock);
+}
+
+static void sugov_work(struct work_struct *work)
+{
+       struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+
+       mutex_lock(&sg_policy->work_lock);
+       __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
+                               CPUFREQ_RELATION_L);
+       mutex_unlock(&sg_policy->work_lock);
+
+       sg_policy->work_in_progress = false;
+}
+
+static void sugov_irq_work(struct irq_work *irq_work)
+{
+       struct sugov_policy *sg_policy;
+
+       sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
+       schedule_work_on(smp_processor_id(), &sg_policy->work);
+}
+
+/************************** sysfs interface ************************/
+
+static struct sugov_tunables *global_tunables;
+static DEFINE_MUTEX(global_tunables_lock);
+
+static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
+{
+       return container_of(attr_set, struct sugov_tunables, attr_set);
+}
+
+static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+{
+       struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+       return sprintf(buf, "%u\n", tunables->rate_limit_us);
+}
+
+static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
+                                  size_t count)
+{
+       struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+       struct sugov_policy *sg_policy;
+       unsigned int rate_limit_us;
+
+       if (kstrtouint(buf, 10, &rate_limit_us))
+               return -EINVAL;
+
+       tunables->rate_limit_us = rate_limit_us;
+
+       list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
+               sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
+
+       return count;
+}
+
+static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+
+static struct attribute *sugov_attributes[] = {
+       &rate_limit_us.attr,
+       NULL
+};
+
+static struct kobj_type sugov_tunables_ktype = {
+       .default_attrs = sugov_attributes,
+       .sysfs_ops = &governor_sysfs_ops,
+};
+
+/********************** cpufreq governor interface *********************/
+
+static struct cpufreq_governor schedutil_gov;
+
+static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
+{
+       struct sugov_policy *sg_policy;
+
+       sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
+       if (!sg_policy)
+               return NULL;
+
+       sg_policy->policy = policy;
+       init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+       INIT_WORK(&sg_policy->work, sugov_work);
+       mutex_init(&sg_policy->work_lock);
+       raw_spin_lock_init(&sg_policy->update_lock);
+       return sg_policy;
+}
+
+static void sugov_policy_free(struct sugov_policy *sg_policy)
+{
+       mutex_destroy(&sg_policy->work_lock);
+       kfree(sg_policy);
+}
+
+static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
+{
+       struct sugov_tunables *tunables;
+
+       tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+       if (tunables) {
+               gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
+               if (!have_governor_per_policy())
+                       global_tunables = tunables;
+       }
+       return tunables;
+}
+
+static void sugov_tunables_free(struct sugov_tunables *tunables)
+{
+       if (!have_governor_per_policy())
+               global_tunables = NULL;
+
+       kfree(tunables);
+}
+
+static int sugov_init(struct cpufreq_policy *policy)
+{
+       struct sugov_policy *sg_policy;
+       struct sugov_tunables *tunables;
+       unsigned int lat;
+       int ret = 0;
+
+       /* State should be equivalent to EXIT */
+       if (policy->governor_data)
+               return -EBUSY;
+
+       sg_policy = sugov_policy_alloc(policy);
+       if (!sg_policy)
+               return -ENOMEM;
+
+       mutex_lock(&global_tunables_lock);
+
+       if (global_tunables) {
+               if (WARN_ON(have_governor_per_policy())) {
+                       ret = -EINVAL;
+                       goto free_sg_policy;
+               }
+               policy->governor_data = sg_policy;
+               sg_policy->tunables = global_tunables;
+
+               gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
+               goto out;
+       }
+
+       tunables = sugov_tunables_alloc(sg_policy);
+       if (!tunables) {
+               ret = -ENOMEM;
+               goto free_sg_policy;
+       }
+
+       tunables->rate_limit_us = LATENCY_MULTIPLIER;
+       lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+       if (lat)
+               tunables->rate_limit_us *= lat;
+
+       policy->governor_data = sg_policy;
+       sg_policy->tunables = tunables;
+
+       ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
+                                  get_governor_parent_kobj(policy), "%s",
+                                  schedutil_gov.name);
+       if (ret)
+               goto fail;
+
+ out:
+       mutex_unlock(&global_tunables_lock);
+
+       cpufreq_enable_fast_switch(policy);
+       return 0;
+
+ fail:
+       policy->governor_data = NULL;
+       sugov_tunables_free(tunables);
+
+ free_sg_policy:
+       mutex_unlock(&global_tunables_lock);
+
+       sugov_policy_free(sg_policy);
+       pr_err("cpufreq: schedutil governor initialization failed (error %d)\n", ret);
+       return ret;
+}
+
+static int sugov_exit(struct cpufreq_policy *policy)
+{
+       struct sugov_policy *sg_policy = policy->governor_data;
+       struct sugov_tunables *tunables = sg_policy->tunables;
+       unsigned int count;
+
+       cpufreq_disable_fast_switch(policy);
+
+       mutex_lock(&global_tunables_lock);
+
+       count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
+       policy->governor_data = NULL;
+       if (!count)
+               sugov_tunables_free(tunables);
+
+       mutex_unlock(&global_tunables_lock);
+
+       sugov_policy_free(sg_policy);
+       return 0;
+}
+
+static int sugov_start(struct cpufreq_policy *policy)
+{
+       struct sugov_policy *sg_policy = policy->governor_data;
+       unsigned int cpu;
+
+       sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
+       sg_policy->last_freq_update_time = 0;
+       sg_policy->next_freq = UINT_MAX;
+       sg_policy->work_in_progress = false;
+       sg_policy->need_freq_update = false;
+
+       for_each_cpu(cpu, policy->cpus) {
+               struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
+               sg_cpu->sg_policy = sg_policy;
+               if (policy_is_shared(policy)) {
+                       sg_cpu->util = ULONG_MAX;
+                       sg_cpu->max = 0;
+                       sg_cpu->last_update = 0;
+                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+                                                    sugov_update_shared);
+               } else {
+                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+                                                    sugov_update_single);
+               }
+       }
+       return 0;
+}
+
+static int sugov_stop(struct cpufreq_policy *policy)
+{
+       struct sugov_policy *sg_policy = policy->governor_data;
+       unsigned int cpu;
+
+       for_each_cpu(cpu, policy->cpus)
+               cpufreq_remove_update_util_hook(cpu);
+
+       synchronize_sched();
+
+       irq_work_sync(&sg_policy->irq_work);
+       cancel_work_sync(&sg_policy->work);
+       return 0;
+}
+
+static int sugov_limits(struct cpufreq_policy *policy)
+{
+       struct sugov_policy *sg_policy = policy->governor_data;
+
+       if (!policy->fast_switch_enabled) {
+               mutex_lock(&sg_policy->work_lock);
+
+               if (policy->max < policy->cur)
+                       __cpufreq_driver_target(policy, policy->max,
+                                               CPUFREQ_RELATION_H);
+               else if (policy->min > policy->cur)
+                       __cpufreq_driver_target(policy, policy->min,
+                                               CPUFREQ_RELATION_L);
+
+               mutex_unlock(&sg_policy->work_lock);
+       }
+
+       sg_policy->need_freq_update = true;
+       return 0;
+}
+
+int sugov_governor(struct cpufreq_policy *policy, unsigned int event)
+{
+       if (event == CPUFREQ_GOV_POLICY_INIT) {
+               return sugov_init(policy);
+       } else if (policy->governor_data) {
+               switch (event) {
+               case CPUFREQ_GOV_POLICY_EXIT:
+                       return sugov_exit(policy);
+               case CPUFREQ_GOV_START:
+                       return sugov_start(policy);
+               case CPUFREQ_GOV_STOP:
+                       return sugov_stop(policy);
+               case CPUFREQ_GOV_LIMITS:
+                       return sugov_limits(policy);
+               }
+       }
+       return -EINVAL;
+}
+
+static struct cpufreq_governor schedutil_gov = {
+       .name = "schedutil",
+       .governor = sugov_governor,
+       .owner = THIS_MODULE,
+};
+
+static int __init sugov_module_init(void)
+{
+       return cpufreq_register_governor(&schedutil_gov);
+}
+
+static void __exit sugov_module_exit(void)
+{
+       cpufreq_unregister_governor(&schedutil_gov);
+}
+
+MODULE_AUTHOR("Rafael J. Wysocki <rafael.j.wysocki@intel.com>");
+MODULE_DESCRIPTION("Utilization-based CPU frequency selection");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &schedutil_gov;
+}
+
+fs_initcall(sugov_module_init);
+#else
+module_init(sugov_module_init);
+#endif
+module_exit(sugov_module_exit);
index ec2e8d23527e6c92a4fe1b5ef45dfb9ac1e242a8..921d6e5d33b74b7a417f7e61e006bb6ad3065ca2 100644 (file)
@@ -1842,6 +1842,14 @@ static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned lo
 static inline void cpufreq_trigger_update(u64 time) {}
 #endif /* CONFIG_CPU_FREQ */
 
+#ifdef arch_scale_freq_capacity
+#ifndef arch_scale_freq_invariant
+#define arch_scale_freq_invariant()    (true)
+#endif
+#else /* arch_scale_freq_capacity */
+#define arch_scale_freq_invariant()    (false)
+#endif
+
 static inline void account_reset_rq(struct rq *rq)
 {
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
index 81b87451c0ea145f93ba41e47dacc6a2acec8189..0c7dee221dca4747a30eca158d4c01a3deb1c4dc 100644 (file)
@@ -15,5 +15,6 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency);
 EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
 
index 05ddc0820771eb7bc456aab9e2f0e5167ce7f023..6f965864cc029ad47f63c67f6713cb79a519599e 100644 (file)
@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
        trace_create_file("filter", 0644, file->dir, file,
                          &ftrace_event_filter_fops);
 
-       trace_create_file("trigger", 0644, file->dir, file,
-                         &event_trigger_fops);
+       /*
+        * Only event directories that can be enabled should have
+        * triggers.
+        */
+       if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+               trace_create_file("trigger", 0644, file->dir, file,
+                                 &event_trigger_fops);
 
        trace_create_file("format", 0444, file->dir, call,
                          &ftrace_event_format_fops);
index 9e0b0315a724caf24ea195e275ed431639a262b4..53ad6c0831aebe6d3c9ec7cfc1387fa34ca7ea72 100644 (file)
 
 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
 
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
 #define STACK_ALLOC_ALIGN 4
 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
                                        STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+               STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
 #define STACK_ALLOC_SLABS_CAP 1024
 #define STACK_ALLOC_MAX_SLABS \
        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -59,6 +61,7 @@ union handle_parts {
        struct {
                u32 slabindex : STACK_ALLOC_INDEX_BITS;
                u32 offset : STACK_ALLOC_OFFSET_BITS;
+               u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
        };
 };
 
@@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
        stack->size = size;
        stack->handle.slabindex = depot_index;
        stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+       stack->handle.valid = 1;
        memcpy(stack->entries, entries, size * sizeof(unsigned long));
        depot_offset += required_size;
 
index ccf97b02b85f32d38f6f68a41bedf89b9a1ee596..8fa2540438015c1859724c606072a2770939d954 100644 (file)
@@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
                                                        ISOLATE_UNEVICTABLE);
 
-               /*
-                * In case of fatal failure, release everything that might
-                * have been isolated in the previous iteration, and signal
-                * the failure back to caller.
-                */
-               if (!pfn) {
-                       putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
+               if (!pfn)
                        break;
-               }
 
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
                        break;
@@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node)
 
 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
 {
-       return pgdat->kcompactd_max_order > 0;
+       return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
 }
 
 static bool kcompactd_node_suitable(pg_data_t *pgdat)
@@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                INIT_LIST_HEAD(&cc.freepages);
                INIT_LIST_HEAD(&cc.migratepages);
 
+               if (kthread_should_stop())
+                       return;
                status = compact_zone(zone, &cc);
 
                if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
index df67b53ae3c53423b8f3065093a6e65f90dbafb7..f7daa7de8f4867dc871edb8ea3a1ec6189e4d54f 100644 (file)
@@ -3452,7 +3452,7 @@ next:
                }
        }
 
-       pr_info("%lu of %lu THP split", split, total);
+       pr_info("%lu of %lu THP split\n", split, total);
 
        return 0;
 }
@@ -3463,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void)
 {
        void *ret;
 
-       ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
+       ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
                        &split_huge_pages_fops);
        if (!ret)
                pr_warn("Failed to create split_huge_pages in debugfs");
index 305537fc86406076ba07b3cdfeb6bf7affbc8ff6..52c218e2b724cdb8db1678c4070f31ab033daf65 100644 (file)
@@ -1222,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
-#ifdef CONFIG_DEBUG_VM
-                               if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
-                                       pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
-                                               __func__, addr, end,
-                                               vma->vm_start,
-                                               vma->vm_end);
-                                       BUG();
-                               }
-#endif
+                               VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
+                                   !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
                                split_huge_pmd(vma, pmd, addr);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
index 999792d35ccc0faee6c4f85d70b6e31876855284..bc5149d5ec38016da91a8b1c85aeca0193143f0c 100644 (file)
@@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
        if (gdtc->dirty > gdtc->bg_thresh)
                return true;
 
-       if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
+       if (wb_stat(wb, WB_RECLAIMABLE) >
+           wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
                return true;
 
        if (mdtc) {
@@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
                if (mdtc->dirty > mdtc->bg_thresh)
                        return true;
 
-               if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
+               if (wb_stat(wb, WB_RECLAIMABLE) >
+                   wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
                        return true;
        }
 
index 59de90d5d3a362c7293e624e748cd7b6ebab73b3..c1069efcc4d7477a5fc517303b67747f89b77074 100644 (file)
@@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void)
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_wmark_min)
+core_initcall(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
index 91dad80d068b75de629105ec0e688ce16335e16f..de0f119b1780b2af14d6bf6868f0a8da0f1ee236 100644 (file)
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 static LIST_HEAD(zswap_pools);
 /* protects zswap_pools list modification */
 static DEFINE_SPINLOCK(zswap_pools_lock);
+/* pool counter to provide unique names to zpool */
+static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 
 /* used by param callback function */
 static bool zswap_init_started;
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
        struct zswap_pool *pool;
+       char name[38]; /* 'zswap' + 32 char (max) num + \0 */
        gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 
        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
                return NULL;
        }
 
-       pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
+       /* unique name for each pool specifically required by zsmalloc */
+       snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
+
+       pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
        if (!pool->zpool) {
                pr_err("%s zpool not available\n", type);
                goto error;
index 3315b9a598af0f71a95a080d450cf1faf3a70a1d..4026f198a7345c91a34ef1c8aed6f750e7b27d3e 100644 (file)
 
 #include "bat_v_elp.h"
 #include "bat_v_ogm.h"
+#include "hard-interface.h"
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
 
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+       /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+        * set the interface as ACTIVE right away, without any risk of race
+        * condition
+        */
+       if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
 static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
 {
        int ret;
@@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
 
 static struct batadv_algo_ops batadv_batman_v __read_mostly = {
        .name = "BATMAN_V",
+       .bat_iface_activate = batadv_v_iface_activate,
        .bat_iface_enable = batadv_v_iface_enable,
        .bat_iface_disable = batadv_v_iface_disable,
        .bat_iface_update_mac = batadv_v_iface_update_mac,
index e96d7c745b4a1de6444284f38207ed599925ac2e..3e6b2624f9809365ac29da7f88a3895af3df8728 100644 (file)
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
  *
  * An originator O is selected if and only if its DHT_ID value is one of three
  * closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+                            unsigned short vid)
 {
        int select;
        batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
                return NULL;
 
        dat.ip = ip_dst;
-       dat.vid = 0;
+       dat.vid = vid;
        ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
                                                    BATADV_DAT_ADDR_MAX);
 
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
+ * @vid: VLAN identifier
  * @packet_subtype: unicast4addr packet subtype to use
  *
  * This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                                 struct sk_buff *skb, __be32 ip,
-                                int packet_subtype)
+                                unsigned short vid, int packet_subtype)
 {
        int i;
        bool ret = false;
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
        struct sk_buff *tmp_skb;
        struct batadv_dat_candidate *cand;
 
-       cand = batadv_dat_select_candidates(bat_priv, ip);
+       cand = batadv_dat_select_candidates(bat_priv, ip, vid);
        if (!cand)
                goto out;
 
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                ret = true;
        } else {
                /* Send the request to the DHT */
-               ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+               ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
                                           BATADV_P_DAT_DHT_GET);
        }
 out:
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        /* Send the ARP reply to the candidates for both the IP addresses that
         * the node obtained from the ARP reply
         */
-       batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
-       batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
 }
 
 /**
index b22b2775a0a5ff706c17ea06c39c9bb092da1f9a..0a7deaf2670a981078d0c5c3de1805f69c265796 100644 (file)
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
 
        batadv_update_min_mtu(hard_iface->soft_iface);
 
+       if (bat_priv->bat_algo_ops->bat_iface_activate)
+               bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
+
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
@@ -572,8 +575,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hard_iface *primary_if = NULL;
 
-       if (hard_iface->if_status == BATADV_IF_ACTIVE)
-               batadv_hardif_deactivate_interface(hard_iface);
+       batadv_hardif_deactivate_interface(hard_iface);
 
        if (hard_iface->if_status != BATADV_IF_INACTIVE)
                goto out;
index e4cbb0753e37ff6681a20e8912df1f4ad1e74a05..c355a824713cd9224e2c705e222c2061cb504b5a 100644 (file)
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
 {
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
-       struct batadv_hardif_neigh_node *hardif_neigh;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
        struct batadv_algo_ops *bao;
 
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
                batadv_neigh_ifinfo_put(neigh_ifinfo);
        }
 
-       hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
-                                              neigh_node->addr);
-       if (hardif_neigh) {
-               /* batadv_hardif_neigh_get() increases refcount too */
-               batadv_hardif_neigh_put(hardif_neigh);
-               batadv_hardif_neigh_put(hardif_neigh);
-       }
+       batadv_hardif_neigh_put(neigh_node->hardif_neigh);
 
        if (bao->bat_neigh_free)
                bao->bat_neigh_free(neigh_node);
@@ -663,6 +656,11 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        ether_addr_copy(neigh_node->addr, neigh_addr);
        neigh_node->if_incoming = hard_iface;
        neigh_node->orig_node = orig_node;
+       neigh_node->last_seen = jiffies;
+
+       /* increment unique neighbor refcount */
+       kref_get(&hardif_neigh->refcount);
+       neigh_node->hardif_neigh = hardif_neigh;
 
        /* extra reference for return */
        kref_init(&neigh_node->refcount);
@@ -672,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
-       /* increment unique neighbor refcount */
-       kref_get(&hardif_neigh->refcount);
-
        batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
                   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
                   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
index 4dd646a52f1a16ca297a3cdb1abeff56206df166..b781bf75325061a5157c54bc976d1df1a522e0bd 100644 (file)
@@ -105,6 +105,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
                neigh_node = NULL;
 
        spin_lock_bh(&orig_node->neigh_list_lock);
+       /* curr_router used earlier may not be the current orig_ifinfo->router
+        * anymore because it was dereferenced outside of the neigh_list_lock
+        * protected region. After the new best neighbor has replace the current
+        * best neighbor the reference counter needs to decrease. Consequently,
+        * the code needs to ensure the curr_router variable contains a pointer
+        * to the replaced best neighbor.
+        */
+       curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
        rcu_assign_pointer(orig_ifinfo->router, neigh_node);
        spin_unlock_bh(&orig_node->neigh_list_lock);
        batadv_orig_ifinfo_put(orig_ifinfo);
index 3ce06e0a91b1c125cdf5ff594db57529a2deaacf..76417850d3fcbb6ddf5d6f15c822cebdda4912da 100644 (file)
@@ -675,6 +675,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
+                       if (!forw_packet->own)
+                               atomic_inc(&bat_priv->bcast_queue_left);
+
                        batadv_forw_packet_free(forw_packet);
                }
        }
@@ -702,6 +705,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
+                       if (!forw_packet->own)
+                               atomic_inc(&bat_priv->batman_queue_left);
+
                        batadv_forw_packet_free(forw_packet);
                }
        }
index 0710379491bffc0627c35f0ed5ba6bbf535bb1bb..8a136b6a1ff06165b7e7c257fd8b00847fbf3065 100644 (file)
@@ -408,11 +408,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
         */
        nf_reset(skb);
 
+       if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+               goto dropped;
+
        vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
+               if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+                       goto dropped;
+
                vhdr = (struct vlan_ethhdr *)skb->data;
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
@@ -424,8 +430,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
        }
 
        /* skb->dev & skb->pkt_type are set here */
-       if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
-               goto dropped;
        skb->protocol = eth_type_trans(skb, soft_iface);
 
        /* should not be necessary anymore as we use skb_pull_rcsum()
index 0b43e86328a59bb42f7200380b840b0209fd26a1..9b4551a86535c6fb0264948cfa5aad63b8ee6dbb 100644 (file)
@@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
        tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
                                      common.refcount);
 
+       batadv_softif_vlan_put(tt_local_entry->vlan);
+
        kfree_rcu(tt_local_entry, common.rcu);
 }
 
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        kref_get(&tt_local->common.refcount);
        tt_local->last_seen = jiffies;
        tt_local->common.added_at = tt_local->last_seen;
+       tt_local->vlan = vlan;
 
        /* the batman interface mac and multicast addresses should never be
         * purged
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
-       struct batadv_softif_vlan *vlan;
        struct hlist_head *head;
        unsigned short vid;
        u32 i;
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        last_seen_msecs = last_seen_msecs % 1000;
 
                        no_purge = tt_common_entry->flags & np_flag;
-
-                       vlan = batadv_softif_vlan_get(bat_priv, vid);
-                       if (!vlan) {
-                               seq_printf(seq, "Cannot retrieve VLAN %d\n",
-                                          BATADV_PRINT_VID(vid));
-                               continue;
-                       }
-
                        seq_printf(seq,
                                   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                                     BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
                                   no_purge ? 0 : last_seen_secs,
                                   no_purge ? 0 : last_seen_msecs,
-                                  vlan->tt.crc);
-
-                       batadv_softif_vlan_put(vlan);
+                                  tt_local->vlan->tt.crc);
                }
                rcu_read_unlock();
        }
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       struct batadv_softif_vlan *vlan;
        void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
        /* extra call to free the local tt entry */
        batadv_tt_local_entry_put(tt_local_entry);
 
-       /* decrease the reference held for this vlan */
-       vlan = batadv_softif_vlan_get(bat_priv, vid);
-       if (!vlan)
-               goto out;
-
-       batadv_softif_vlan_put(vlan);
-       batadv_softif_vlan_put(vlan);
-
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        u32 i;
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv,
-                                                     tt_common_entry->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
index 9abfb3e73c3448a9612c9c282166fe5e18b44327..1e47fbe8bb7b2e9b3ec79c64ef508d7305b5d1a7 100644 (file)
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
  * @ifinfo_lock: lock protecting private ifinfo members and list
  * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
+ * @hardif_neigh: hardif_neigh of this neighbor
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -444,6 +445,7 @@ struct batadv_neigh_node {
        spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
        struct batadv_hard_iface *if_incoming;
        unsigned long last_seen;
+       struct batadv_hardif_neigh_node *hardif_neigh;
        struct kref refcount;
        struct rcu_head rcu;
 };
@@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry {
  * struct batadv_tt_local_entry - translation table local entry data
  * @common: general translation table data
  * @last_seen: timestamp used for purging stale tt local entries
+ * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
        struct batadv_tt_common_entry common;
        unsigned long last_seen;
+       struct batadv_softif_vlan *vlan;
 };
 
 /**
@@ -1250,6 +1254,8 @@ struct batadv_forw_packet {
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
  * @name: name of the algorithm
+ * @bat_iface_activate: start routing mechanisms when hard-interface is brought
+ *  up
  * @bat_iface_enable: init routing info when hard-interface is enabled
  * @bat_iface_disable: de-init routing info when hard-interface is disabled
  * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1283,7 @@ struct batadv_forw_packet {
 struct batadv_algo_ops {
        struct hlist_node list;
        char *name;
+       void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
        int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
index 77a71cd68535fc02ae939168934fa4fb4f419644..5c925ac50b9572755e35be168cde8c858fd4ba95 100644 (file)
@@ -2802,7 +2802,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
-               features &= ~NETIF_F_CSUM_MASK;
+               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
index bc68eced0105716dca6f04abbdd491a1cfb9e908..0d9e9d7bb029373e37e4de6ed4003397d41f82cd 100644 (file)
@@ -470,6 +470,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                                                     const struct sock *sk2,
                                                     bool match_wildcard))
 {
+       struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
        struct sock *sk2;
        struct hlist_nulls_node *node;
        kuid_t uid = sock_i_uid(sk);
@@ -479,6 +480,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                    sk2->sk_family == sk->sk_family &&
                    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
                    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+                   inet_csk(sk2)->icsk_bind_hash == tb &&
                    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
                    saddr_same(sk, sk2, false))
                        return reuseport_add_sock(sk, sk2);
index af5d1f38217f4e4dcb977b6410d0d9a6a6c1e87c..205a2b8a5a84579c909a62fd4aafdd19fe64aa54 100644 (file)
@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
        return flags;
 }
 
+/* Fills in tpi and returns header length to be pulled. */
 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                            bool *csum_err)
 {
@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                                return -EINVAL;
                }
        }
-       return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
+       return hdr_len;
 }
 
 static void ipgre_err(struct sk_buff *skb, u32 info,
@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
        struct tnl_ptk_info tpi;
        bool csum_err = false;
 
-       if (parse_gre_header(skb, &tpi, &csum_err)) {
+       if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
                if (!csum_err)          /* ignore csum errors. */
                        return;
        }
@@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
 {
        struct tnl_ptk_info tpi;
        bool csum_err = false;
+       int hdr_len;
 
 #ifdef CONFIG_NET_IPGRE_BROADCAST
        if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
@@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
        }
 #endif
 
-       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+       hdr_len = parse_gre_header(skb, &tpi, &csum_err);
+       if (hdr_len < 0)
+               goto drop;
+       if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false) < 0)
                goto drop;
 
        if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
@@ -523,7 +528,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
        return ip_route_output_key(net, fl);
 }
 
-static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
+                       __be16 proto)
 {
        struct ip_tunnel_info *tun_info;
        const struct ip_tunnel_key *key;
@@ -575,7 +581,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
-       build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+       build_header(skb, tunnel_hlen, flags, proto,
                     tunnel_id_to_key(tun_info->key.tun_id), 0);
 
        df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
@@ -616,7 +622,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
        const struct iphdr *tnl_params;
 
        if (tunnel->collect_md) {
-               gre_fb_xmit(skb, dev);
+               gre_fb_xmit(skb, dev, skb->protocol);
                return NETDEV_TX_OK;
        }
 
@@ -660,7 +666,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        if (tunnel->collect_md) {
-               gre_fb_xmit(skb, dev);
+               gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
                return NETDEV_TX_OK;
        }
 
@@ -893,7 +899,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
        netif_keep_dst(dev);
        dev->addr_len           = 4;
 
-       if (iph->daddr) {
+       if (iph->daddr && !tunnel->collect_md) {
 #ifdef CONFIG_NET_IPGRE_BROADCAST
                if (ipv4_is_multicast(iph->daddr)) {
                        if (!iph->saddr)
@@ -902,8 +908,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
                        dev->header_ops = &ipgre_header_ops;
                }
 #endif
-       } else
+       } else if (!tunnel->collect_md) {
                dev->header_ops = &ipgre_header_ops;
+       }
 
        return ip_tunnel_init(dev);
 }
@@ -946,6 +953,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
        if (flags & (GRE_VERSION|GRE_ROUTING))
                return -EINVAL;
 
+       if (data[IFLA_GRE_COLLECT_METADATA] &&
+           data[IFLA_GRE_ENCAP_TYPE] &&
+           nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
+               return -EINVAL;
+
        return 0;
 }
 
index 6aad0192443d49966785f0de67ee30934775dfca..a69ed94bda1b107634f0aacb6dbedb3d03cc87b0 100644 (file)
@@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
                if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
-                       dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
-                                         fl4.saddr);
                        ip_rt_put(rt);
                }
                if (dev->type != ARPHRD_ETHER)
                        dev->flags |= IFF_POINTOPOINT;
+
+               dst_cache_reset(&tunnel->dst_cache);
        }
 
        if (!tdev && tunnel->parms.link)
index 2ae3c4fd8aabc65a7206a73480b3dca4b975f7e3..41f18de5dcc2cda4686af5fe89c8ab4760966d91 100644 (file)
@@ -120,8 +120,7 @@ nla_put_failure:
 
 static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       /* No encapsulation overhead */
-       return 0;
+       return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
 }
 
 static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
index afca2eb4dfa777c75288dfb6fce9636b309a2ebc..6edfa99803148815e383eb13ce6a9c1eb098058d 100644 (file)
@@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
                               sizeof(udp_conf.peer_ip6));
                        udp_conf.use_udp6_tx_checksums =
-                           cfg->udp6_zero_tx_checksums;
+                         ! cfg->udp6_zero_tx_checksums;
                        udp_conf.use_udp6_rx_checksums =
-                           cfg->udp6_zero_rx_checksums;
+                         ! cfg->udp6_zero_rx_checksums;
                } else
 #endif
                {
index 453b4e7417804105cb136e2ec7f4c57a39392db3..e1cb22c16530377cc8ce0d9bf907bbd662f18337 100644 (file)
@@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = dev_alloc_name(ndev, ndev->name);
                if (ret < 0) {
-                       free_netdev(ndev);
+                       ieee80211_if_free(ndev);
                        return ret;
                }
 
@@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = register_netdevice(ndev);
                if (ret) {
-                       free_netdev(ndev);
+                       ieee80211_if_free(ndev);
                        return ret;
                }
        }
index 61ed2a8764ba4c7a66788d7843cef18abb9e904a..86187dad14403100ff6199ee79beed5262de818f 100644 (file)
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 
 /*
  * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_CONNECTED bit protects those paths from being
+ * it is set.  The RDS_CONN_UP bit protects those paths from being
  * called while it isn't set.
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
        if (!tc)
                return -ENOMEM;
 
+       mutex_init(&tc->t_conn_lock);
        tc->t_sock = NULL;
        tc->t_tinc = NULL;
        tc->t_tinc_hdr_rem = sizeof(struct rds_header);
index 64f873c0c6b6d15574ec9771506d04e617a6aaac..41c228300525c029df02472b609ada1a71cea98e 100644 (file)
@@ -12,6 +12,10 @@ struct rds_tcp_connection {
 
        struct list_head        t_tcp_node;
        struct rds_connection   *conn;
+       /* t_conn_lock synchronizes the connection establishment between
+        * rds_tcp_accept_one and rds_tcp_conn_connect
+        */
+       struct mutex            t_conn_lock;
        struct socket           *t_sock;
        void                    *t_orig_write_space;
        void                    *t_orig_data_ready;
index 5cb16875c4603dba71c733de6600ada40e39cffc..49a3fcfed360edfb146416976c700b24e4520864 100644 (file)
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        struct socket *sock = NULL;
        struct sockaddr_in src, dest;
        int ret;
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+
+       mutex_lock(&tc->t_conn_lock);
 
+       if (rds_conn_up(conn)) {
+               mutex_unlock(&tc->t_conn_lock);
+               return 0;
+       }
        ret = sock_create_kern(rds_conn_net(conn), PF_INET,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0)
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        }
 
 out:
+       mutex_unlock(&tc->t_conn_lock);
        if (sock)
                sock_release(sock);
        return ret;
index 0936a4a32b476fdde5c7208fc465ec3324bbcf09..be263cdf268bae5b59a4746d4011d1042ede58cd 100644 (file)
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
        struct rds_connection *conn;
        int ret;
        struct inet_sock *inet;
-       struct rds_tcp_connection *rs_tcp;
+       struct rds_tcp_connection *rs_tcp = NULL;
+       int conn_state;
+       struct sock *nsk;
 
        ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
                               sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
         * rds_tcp_state_change() will do that cleanup
         */
        rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-       if (rs_tcp->t_sock &&
-           ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
-               struct sock *nsk = new_sock->sk;
-
-               nsk->sk_user_data = NULL;
-               nsk->sk_prot->disconnect(nsk, 0);
-               tcp_done(nsk);
-               new_sock = NULL;
-               ret = 0;
-               goto out;
-       } else if (rs_tcp->t_sock) {
-               rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
-               conn->c_outgoing = 0;
-       }
-
        rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
+       mutex_lock(&rs_tcp->t_conn_lock);
+       conn_state = rds_conn_state(conn);
+       if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
+               goto rst_nsk;
+       if (rs_tcp->t_sock) {
+               /* Need to resolve a duelling SYN between peers.
+                * We have an outstanding SYN to this peer, which may
+                * potentially have transitioned to the RDS_CONN_UP state,
+                * so we must quiesce any send threads before resetting
+                * c_transport_data.
+                */
+               wait_event(conn->c_waitq,
+                          !test_bit(RDS_IN_XMIT, &conn->c_flags));
+               if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+                       goto rst_nsk;
+               } else if (rs_tcp->t_sock) {
+                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       conn->c_outgoing = 0;
+               }
+       }
        rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn);
+       rds_connect_complete(conn); /* marks RDS_CONN_UP */
+       new_sock = NULL;
+       ret = 0;
+       goto out;
+rst_nsk:
+       /* reset the newly returned accept sock and bail */
+       nsk = new_sock->sk;
+       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+       nsk->sk_user_data = NULL;
+       nsk->sk_prot->disconnect(nsk, 0);
+       tcp_done(nsk);
        new_sock = NULL;
        ret = 0;
-
 out:
+       if (rs_tcp)
+               mutex_unlock(&rs_tcp->t_conn_lock);
        if (new_sock)
                sock_release(new_sock);
        return ret;
index 9640bb39a5d293d55a96edc5164d369c1cded127..4befe97a90349832030e139125369c048cb1d67c 100644 (file)
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
        sch->q.qlen++;
 }
 
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct sk_buff *segs;
+       netdev_features_t features = netif_skb_features(skb);
+
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+       if (IS_ERR_OR_NULL(segs)) {
+               qdisc_reshape_fail(skb, sch);
+               return NULL;
+       }
+       consume_skb(skb);
+       return segs;
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
+       struct sk_buff *segs = NULL;
+       unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+       int nb = 0;
        int count = 1;
+       int rc = NET_XMIT_SUCCESS;
 
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         * do it now in software before we mangle it.
         */
        if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+               if (skb_is_gso(skb)) {
+                       segs = netem_segment(skb, sch);
+                       if (!segs)
+                               return NET_XMIT_DROP;
+               } else {
+                       segs = skb;
+               }
+
+               skb = segs;
+               segs = segs->next;
+
                if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
                    (skb->ip_summed == CHECKSUM_PARTIAL &&
-                    skb_checksum_help(skb)))
-                       return qdisc_drop(skb, sch);
+                    skb_checksum_help(skb))) {
+                       rc = qdisc_drop(skb, sch);
+                       goto finish_segs;
+               }
 
                skb->data[prandom_u32() % skb_headlen(skb)] ^=
                        1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                sch->qstats.requeues++;
        }
 
+finish_segs:
+       if (segs) {
+               while (segs) {
+                       skb2 = segs->next;
+                       segs->next = NULL;
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       last_len = segs->len;
+                       rc = qdisc_enqueue(segs, sch);
+                       if (rc != NET_XMIT_SUCCESS) {
+                               if (net_xmit_drop_count(rc))
+                                       qdisc_qstats_drop(sch);
+                       } else {
+                               nb++;
+                               len += last_len;
+                       }
+                       segs = skb2;
+               }
+               sch->q.qlen += nb;
+               if (nb > 1)
+                       qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+       }
        return NET_XMIT_SUCCESS;
 }
 
index ace178fd385038523bc498222e9497245ccae4b0..9aaa1bc566ae3ec59d03a1ef3c7872ab86a71c0d 100644 (file)
@@ -1444,6 +1444,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        int bearer_id = b->identity;
        struct tipc_link_entry *le;
        u16 bc_ack = msg_bcast_ack(hdr);
+       u32 self = tipc_own_addr(net);
        int rc = 0;
 
        __skb_queue_head_init(&xmitq);
@@ -1460,6 +1461,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
                        return tipc_node_bc_rcv(net, skb, bearer_id);
        }
 
+       /* Discard unicast link messages destined for another node */
+       if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+               goto discard;
+
        /* Locate neighboring node that sent packet */
        n = tipc_node_find(net, msg_prevnode(hdr));
        if (unlikely(!n))
index 8d8d1ec429eb6761b0d29b30299536ac782779c5..9b96f4fb8cea64f9b98c835b045bd8c840719433 100644 (file)
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
                u64 cookie;
        } data;
 
-       memset(&data, 0, sizeof(data));
        data.pid = bpf_get_current_pid_tgid();
        data.cookie = 0x12345678;
 
index 161dd0d67da8a84f0062672cd9125c491a2f58e3..a9155077feefb957d38dc29b4823a9529a7ee0e9 100644 (file)
@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
                do_usb_entry_multi(symval + i, mod);
 }
 
+static void do_of_entry_multi(void *symval, struct module *mod)
+{
+       char alias[500];
+       int len;
+       char *tmp;
+
+       DEF_FIELD_ADDR(symval, of_device_id, name);
+       DEF_FIELD_ADDR(symval, of_device_id, type);
+       DEF_FIELD_ADDR(symval, of_device_id, compatible);
+
+       len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+                     (*type)[0] ? *type : "*");
+
+       if (compatible[0])
+               sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+                       *compatible);
+
+       /* Replace all whitespace with underscores */
+       for (tmp = alias; tmp && *tmp; tmp++)
+               if (isspace(*tmp))
+                       *tmp = '_';
+
+       buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+       strcat(alias, "C");
+       add_wildcard(alias);
+       buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+}
+
+static void do_of_table(void *symval, unsigned long size,
+                       struct module *mod)
+{
+       unsigned int i;
+       const unsigned long id_size = SIZE_of_device_id;
+
+       device_id_check(mod->name, "of", size, id_size, symval);
+
+       /* Leave last one: it's the terminator. */
+       size -= id_size;
+
+       for (i = 0; i < size; i += id_size)
+               do_of_entry_multi(symval + i, mod);
+}
+
 /* Looks like: hid:bNvNpN */
 static int do_hid_entry(const char *filename,
                             void *symval, char *alias)
@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
 }
 ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
-static int do_of_entry (const char *filename, void *symval, char *alias)
-{
-       int len;
-       char *tmp;
-       DEF_FIELD_ADDR(symval, of_device_id, name);
-       DEF_FIELD_ADDR(symval, of_device_id, type);
-       DEF_FIELD_ADDR(symval, of_device_id, compatible);
-
-       len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
-                     (*type)[0] ? *type : "*");
-
-       if (compatible[0])
-               sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
-                       *compatible);
-
-       /* Replace all whitespace with underscores */
-       for (tmp = alias; tmp && *tmp; tmp++)
-               if (isspace (*tmp))
-                       *tmp = '_';
-
-       return 1;
-}
-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
-
 static int do_vio_entry(const char *filename, void *symval,
                char *alias)
 {
@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
        /* First handle the "special" cases */
        if (sym_is(name, namelen, "usb"))
                do_usb_table(symval, sym->st_size, mod);
+       if (sym_is(name, namelen, "of"))
+               do_of_table(symval, sym->st_size, mod);
        else if (sym_is(name, namelen, "pnp"))
                do_pnp_device_entry(symval, sym->st_size, mod);
        else if (sym_is(name, namelen, "pnp_card"))
index be09e2cacf828d32c75028ccf04f31e88657da70..3cd0a58672dd1df684e12d0a4331b56fd4fa12fb 100644 (file)
@@ -884,10 +884,10 @@ static char *func_tokens[] = {
        "BPRM_CHECK",
        "MODULE_CHECK",
        "FIRMWARE_CHECK",
+       "POST_SETATTR",
        "KEXEC_KERNEL_CHECK",
        "KEXEC_INITRAMFS_CHECK",
-       "POLICY_CHECK",
-       "POST_SETATTR"
+       "POLICY_CHECK"
 };
 
 void *ima_policy_start(struct seq_file *m, loff_t *pos)