]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branches 'devel-stable', 'entry', 'fixes', 'mach-types', 'misc' and 'smp-hotplu...
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 2 May 2013 20:30:36 +0000 (21:30 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 2 May 2013 20:30:36 +0000 (21:30 +0100)
252 files changed:
Documentation/arm/cluster-pm-race-avoidance.txt [new file with mode: 0644]
Documentation/arm/vlocks.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Makefile
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/debug.S [new file with mode: 0644]
arch/arm/boot/compressed/misc.c
arch/arm/common/Makefile
arch/arm/common/mcpm_entry.c [new file with mode: 0644]
arch/arm/common/mcpm_head.S [new file with mode: 0644]
arch/arm/common/mcpm_platsmp.c [new file with mode: 0644]
arch/arm/common/vlock.S [new file with mode: 0644]
arch/arm/common/vlock.h [new file with mode: 0644]
arch/arm/include/asm/atomic.h
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/cp15.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/glue-cache.h
arch/arm/include/asm/glue-df.h
arch/arm/include/asm/hardware/iop3xx.h
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/kvm_vgic.h
arch/arm/include/asm/mach/pci.h
arch/arm/include/asm/mcpm.h [new file with mode: 0644]
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/debug/uncompress.h [new file with mode: 0644]
arch/arm/include/uapi/asm/kvm.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/bios32.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/head-common.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/return_address.c
arch/arm/kernel/sched_clock.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp_scu.c
arch/arm/kernel/smp_tlb.c
arch/arm/kernel/tcm.c
arch/arm/kernel/tcm.h [deleted file]
arch/arm/kvm/Makefile
arch/arm/kvm/arm.c
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.h
arch/arm/kvm/emulate.c
arch/arm/kvm/guest.c
arch/arm/kvm/handle_exit.c [new file with mode: 0644]
arch/arm/kvm/interrupts.S
arch/arm/kvm/mmio.c
arch/arm/kvm/mmu.c
arch/arm/kvm/vgic.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/omap-smp.c
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/alignment.c
arch/arm/mm/cache-feroceon-l2.c
arch/arm/mm/cache-v3.S [deleted file]
arch/arm/mm/cache-v4.S
arch/arm/mm/dma-mapping.c
arch/arm/mm/flush.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-arm740.S
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-syms.c
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7-2level.S
arch/arm/mm/proc-v7-3level.S
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S
arch/arm/mm/tcm.h [new file with mode: 0644]
arch/arm/tools/mach-types
arch/avr32/include/asm/io.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/tm.S
arch/powerpc/kvm/e500.h
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/kvm/e500mc.c
arch/s390/include/asm/io.h
arch/s390/include/asm/pgtable.h
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/cputime.h [deleted file]
arch/sparc/include/asm/emergency-restart.h [deleted file]
arch/sparc/include/asm/mutex.h [deleted file]
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/serial.h [deleted file]
arch/sparc/include/asm/smp_32.h
arch/sparc/include/asm/switch_to_64.h
arch/sparc/include/asm/tlbflush_64.h
arch/sparc/include/uapi/asm/Kbuild
arch/sparc/include/uapi/asm/types.h [deleted file]
arch/sparc/kernel/smp_64.c
arch/sparc/lib/bitext.c
arch/sparc/mm/iommu.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
arch/sparc/mm/ultra.S
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/include/asm/efi.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/microcode_core_early.c
arch/x86/kernel/setup.c
arch/x86/platform/efi/efi.c
block/blk-core.c
drivers/block/rbd.c
drivers/char/hpet.c
drivers/dma/at_hdmac.c
drivers/firmware/Kconfig
drivers/firmware/efivars.c
drivers/idle/intel_idle.c
drivers/input/tablet/wacom_wac.c
drivers/irqchip/irq-gic.c
drivers/md/dm.c
drivers/md/raid5.c
drivers/mtd/mtdchar.c
drivers/net/bonding/bond_main.c
drivers/net/can/mcp251x.c
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/tun.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/sbus/char/bbc_i2c.c
drivers/ssb/driver_chipcommon_pmu.c
drivers/video/fbmem.c
drivers/video/mmp/core.c
fs/binfmt_elf.c
fs/bio.c
fs/exec.c
fs/hfsplus/extents.c
fs/hugetlbfs/inode.c
fs/proc/array.c
include/asm-generic/pgtable.h
include/linux/blktrace_api.h
include/linux/efi.h
include/linux/kexec.h
include/linux/mm.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/sched.h
include/linux/ssb/ssb_driver_chipcommon.h
include/linux/swiotlb.h
include/linux/ucs2_string.h [new file with mode: 0644]
include/net/addrconf.h
include/net/irda/irlmp.h
include/net/scm.h
include/trace/events/block.h
include/trace/events/sched.h
include/uapi/linux/fuse.h
kernel/events/core.c
kernel/hrtimer.c
kernel/kexec.c
kernel/kprobes.c
kernel/kthread.c
kernel/signal.c
kernel/smpboot.c
kernel/trace/blktrace.c
kernel/user_namespace.c
lib/Kconfig
lib/Makefile
lib/swiotlb.c
lib/ucs2_string.c [new file with mode: 0644]
mm/hugetlb.c
mm/memory.c
mm/mmap.c
mm/vmscan.c
net/802/mrp.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/routing.c
net/batman-adv/translation-table.c
net/batman-adv/vis.c
net/bridge/br_if.c
net/bridge/br_private.h
net/bridge/br_stp_if.c
net/core/dev.c
net/ipv4/esp4.c
net/ipv4/ip_fragment.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/reassembly.c
net/irda/iriap.c
net/irda/irlmp.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_nat_core.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/sched/cls_fw.c
scripts/checkpatch.pl
sound/core/pcm_native.c
tools/power/x86/turbostat/turbostat.c

diff --git a/Documentation/arm/cluster-pm-race-avoidance.txt b/Documentation/arm/cluster-pm-race-avoidance.txt
new file mode 100644 (file)
index 0000000..750b6fc
--- /dev/null
@@ -0,0 +1,498 @@
+Cluster-wide Power-up/power-down race avoidance algorithm
+=========================================================
+
+This file documents the algorithm which is used to coordinate CPU and
+cluster setup and teardown operations and to manage hardware coherency
+controls safely.
+
+The section "Rationale" explains what the algorithm is for and why it is
+needed.  "Basic model" explains general concepts using a simplified view
+of the system.  The other sections explain the actual details of the
+algorithm in use.
+
+
+Rationale
+---------
+
+In a system containing multiple CPUs, it is desirable to have the
+ability to turn off individual CPUs when the system is idle, reducing
+power consumption and thermal dissipation.
+
+In a system containing multiple clusters of CPUs, it is also desirable
+to have the ability to turn off entire clusters.
+
+Turning entire clusters off and on is a risky business, because it
+involves performing potentially destructive operations affecting a group
+of independently running CPUs, while the OS continues to run.  This
+means that we need some coordination in order to ensure that critical
+cluster-level operations are only performed when it is truly safe to do
+so.
+
+Simple locking may not be sufficient to solve this problem, because
+mechanisms like Linux spinlocks may rely on coherency mechanisms which
+are not immediately enabled when a cluster powers up.  Since enabling or
+disabling those mechanisms may itself be a non-atomic operation (such as
+writing some hardware registers and invalidating large caches), other
+methods of coordination are required in order to guarantee safe
+power-down and power-up at the cluster level.
+
+The mechanism presented in this document describes a coherent memory
+based protocol for performing the needed coordination.  It aims to be as
+lightweight as possible, while providing the required safety properties.
+
+
+Basic model
+-----------
+
+Each cluster and CPU is assigned a state, as follows:
+
+       DOWN
+       COMING_UP
+       UP
+       GOING_DOWN
+
+           +---------> UP ----------+
+           |                        v
+
+       COMING_UP                GOING_DOWN
+
+           ^                        |
+           +--------- DOWN <--------+
+
+
+DOWN:  The CPU or cluster is not coherent, and is either powered off or
+       suspended, or is ready to be powered off or suspended.
+
+COMING_UP: The CPU or cluster has committed to moving to the UP state.
+       It may be part way through the process of initialisation and
+       enabling coherency.
+
+UP:    The CPU or cluster is active and coherent at the hardware
+       level.  A CPU in this state is not necessarily being used
+       actively by the kernel.
+
+GOING_DOWN: The CPU or cluster has committed to moving to the DOWN
+       state.  It may be part way through the process of teardown and
+       coherency exit.
+
+
+Each CPU has one of these states assigned to it at any point in time.
+The CPU states are described in the "CPU state" section, below.
+
+Each cluster is also assigned a state, but it is necessary to split the
+state value into two parts (the "cluster" state and "inbound" state) and
+to introduce additional states in order to avoid races between different
+CPUs in the cluster simultaneously modifying the state.  The cluster-
+level states are described in the "Cluster state" section.
+
+To help distinguish the CPU states from cluster states in this
+discussion, the state names are given a CPU_ prefix for the CPU states,
+and a CLUSTER_ or INBOUND_ prefix for the cluster states.
+
+
+CPU state
+---------
+
+In this algorithm, each individual core in a multi-core processor is
+referred to as a "CPU".  CPUs are assumed to be single-threaded:
+therefore, a CPU can only be doing one thing at a single point in time.
+
+This means that CPUs fit the basic model closely.
+
+The algorithm defines the following states for each CPU in the system:
+
+       CPU_DOWN
+       CPU_COMING_UP
+       CPU_UP
+       CPU_GOING_DOWN
+
+        cluster setup and
+       CPU setup complete          policy decision
+             +-----------> CPU_UP ------------+
+             |                                v
+
+       CPU_COMING_UP                   CPU_GOING_DOWN
+
+             ^                                |
+             +----------- CPU_DOWN <----------+
+        policy decision           CPU teardown complete
+       or hardware event
+
+
+The definitions of the four states correspond closely to the states of
+the basic model.
+
+Transitions between states occur as follows.
+
+A trigger event (spontaneous) means that the CPU can transition to the
+next state as a result of making local progress only, with no
+requirement for any external event to happen.
+
+
+CPU_DOWN:
+
+       A CPU reaches the CPU_DOWN state when it is ready for
+       power-down.  On reaching this state, the CPU will typically
+       power itself down or suspend itself, via a WFI instruction or a
+       firmware call.
+
+       Next state:     CPU_COMING_UP
+       Conditions:     none
+
+       Trigger events:
+
+               a) an explicit hardware power-up operation, resulting
+                  from a policy decision on another CPU;
+
+               b) a hardware event, such as an interrupt.
+
+
+CPU_COMING_UP:
+
+       A CPU cannot start participating in hardware coherency until the
+       cluster is set up and coherent.  If the cluster is not ready,
+       then the CPU will wait in the CPU_COMING_UP state until the
+       cluster has been set up.
+
+       Next state:     CPU_UP
+       Conditions:     The CPU's parent cluster must be in CLUSTER_UP.
+       Trigger events: Transition of the parent cluster to CLUSTER_UP.
+
+       Refer to the "Cluster state" section for a description of the
+       CLUSTER_UP state.
+
+
+CPU_UP:
+       When a CPU reaches the CPU_UP state, it is safe for the CPU to
+       start participating in local coherency.
+
+       This is done by jumping to the kernel's CPU resume code.
+
+       Note that the definition of this state is slightly different
+       from the basic model definition: CPU_UP does not mean that the
+       CPU is coherent yet, but it does mean that it is safe to resume
+       the kernel.  The kernel handles the rest of the resume
+       procedure, so the remaining steps are not visible as part of the
+       race avoidance algorithm.
+
+       The CPU remains in this state until an explicit policy decision
+       is made to shut down or suspend the CPU.
+
+       Next state:     CPU_GOING_DOWN
+       Conditions:     none
+       Trigger events: explicit policy decision
+
+
+CPU_GOING_DOWN:
+
+       While in this state, the CPU exits coherency, including any
+       operations required to achieve this (such as cleaning data
+       caches).
+
+       Next state:     CPU_DOWN
+       Conditions:     local CPU teardown complete
+       Trigger events: (spontaneous)
+
+
+Cluster state
+-------------
+
+A cluster is a group of connected CPUs with some common resources.
+Because a cluster contains multiple CPUs, it can be doing multiple
+things at the same time.  This has some implications.  In particular, a
+CPU can start up while another CPU is tearing the cluster down.
+
+In this discussion, the "outbound side" is the view of the cluster state
+as seen by a CPU tearing the cluster down.  The "inbound side" is the
+view of the cluster state as seen by a CPU setting the CPU up.
+
+In order to enable safe coordination in such situations, it is important
+that a CPU which is setting up the cluster can advertise its state
+independently of the CPU which is tearing down the cluster.  For this
+reason, the cluster state is split into two parts:
+
+       "cluster" state: The global state of the cluster; or the state
+               on the outbound side:
+
+               CLUSTER_DOWN
+               CLUSTER_UP
+               CLUSTER_GOING_DOWN
+
+       "inbound" state: The state of the cluster on the inbound side.
+
+               INBOUND_NOT_COMING_UP
+               INBOUND_COMING_UP
+
+
+       The different pairings of these states results in six possible
+       states for the cluster as a whole:
+
+                                   CLUSTER_UP
+                 +==========> INBOUND_NOT_COMING_UP -------------+
+                 #                                               |
+                                                                 |
+            CLUSTER_UP     <----+                                |
+         INBOUND_COMING_UP      |                                v
+
+                 ^             CLUSTER_GOING_DOWN       CLUSTER_GOING_DOWN
+                 #              INBOUND_COMING_UP <=== INBOUND_NOT_COMING_UP
+
+           CLUSTER_DOWN         |                                |
+         INBOUND_COMING_UP <----+                                |
+                                                                 |
+                 ^                                               |
+                 +===========     CLUSTER_DOWN      <------------+
+                              INBOUND_NOT_COMING_UP
+
+       Transitions -----> can only be made by the outbound CPU, and
+       only involve changes to the "cluster" state.
+
+       Transitions ===##> can only be made by the inbound CPU, and only
+       involve changes to the "inbound" state, except where there is no
+       further transition possible on the outbound side (i.e., the
+       outbound CPU has put the cluster into the CLUSTER_DOWN state).
+
+       The race avoidance algorithm does not provide a way to determine
+       which exact CPUs within the cluster play these roles.  This must
+       be decided in advance by some other means.  Refer to the section
+       "Last man and first man selection" for more explanation.
+
+
+       CLUSTER_DOWN/INBOUND_NOT_COMING_UP is the only state where the
+       cluster can actually be powered down.
+
+       The parallelism of the inbound and outbound CPUs is observed by
+       the existence of two different paths from CLUSTER_GOING_DOWN/
+       INBOUND_NOT_COMING_UP (corresponding to GOING_DOWN in the basic
+       model) to CLUSTER_DOWN/INBOUND_COMING_UP (corresponding to
+       COMING_UP in the basic model).  The second path avoids cluster
+       teardown completely.
+
+       CLUSTER_UP/INBOUND_COMING_UP is equivalent to UP in the basic
+       model.  The final transition to CLUSTER_UP/INBOUND_NOT_COMING_UP
+       is trivial and merely resets the state machine ready for the
+       next cycle.
+
+       Details of the allowable transitions follow.
+
+       The next state in each case is notated
+
+               <cluster state>/<inbound state> (<transitioner>)
+
+       where the <transitioner> is the side on which the transition
+       can occur; either the inbound or the outbound side.
+
+
+CLUSTER_DOWN/INBOUND_NOT_COMING_UP:
+
+       Next state:     CLUSTER_DOWN/INBOUND_COMING_UP (inbound)
+       Conditions:     none
+       Trigger events:
+
+               a) an explicit hardware power-up operation, resulting
+                  from a policy decision on another CPU;
+
+               b) a hardware event, such as an interrupt.
+
+
+CLUSTER_DOWN/INBOUND_COMING_UP:
+
+       In this state, an inbound CPU sets up the cluster, including
+       enabling of hardware coherency at the cluster level and any
+       other operations (such as cache invalidation) which are required
+       in order to achieve this.
+
+       The purpose of this state is to do sufficient cluster-level
+       setup to enable other CPUs in the cluster to enter coherency
+       safely.
+
+       Next state:     CLUSTER_UP/INBOUND_COMING_UP (inbound)
+       Conditions:     cluster-level setup and hardware coherency complete
+       Trigger events: (spontaneous)
+
+
+CLUSTER_UP/INBOUND_COMING_UP:
+
+       Cluster-level setup is complete and hardware coherency is
+       enabled for the cluster.  Other CPUs in the cluster can safely
+       enter coherency.
+
+       This is a transient state, leading immediately to
+       CLUSTER_UP/INBOUND_NOT_COMING_UP.  All other CPUs on the cluster
+       should consider treat these two states as equivalent.
+
+       Next state:     CLUSTER_UP/INBOUND_NOT_COMING_UP (inbound)
+       Conditions:     none
+       Trigger events: (spontaneous)
+
+
+CLUSTER_UP/INBOUND_NOT_COMING_UP:
+
+       Cluster-level setup is complete and hardware coherency is
+       enabled for the cluster.  Other CPUs in the cluster can safely
+       enter coherency.
+
+       The cluster will remain in this state until a policy decision is
+       made to power the cluster down.
+
+       Next state:     CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP (outbound)
+       Conditions:     none
+       Trigger events: policy decision to power down the cluster
+
+
+CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP:
+
+       An outbound CPU is tearing the cluster down.  The selected CPU
+       must wait in this state until all CPUs in the cluster are in the
+       CPU_DOWN state.
+
+       When all CPUs are in the CPU_DOWN state, the cluster can be torn
+       down, for example by cleaning data caches and exiting
+       cluster-level coherency.
+
+       To avoid wasteful unnecessary teardown operations, the outbound
+       should check the inbound cluster state for asynchronous
+       transitions to INBOUND_COMING_UP.  Alternatively, individual
+       CPUs can be checked for entry into CPU_COMING_UP or CPU_UP.
+
+
+       Next states:
+
+       CLUSTER_DOWN/INBOUND_NOT_COMING_UP (outbound)
+               Conditions:     cluster torn down and ready to power off
+               Trigger events: (spontaneous)
+
+       CLUSTER_GOING_DOWN/INBOUND_COMING_UP (inbound)
+               Conditions:     none
+               Trigger events:
+
+                       a) an explicit hardware power-up operation,
+                          resulting from a policy decision on another
+                          CPU;
+
+                       b) a hardware event, such as an interrupt.
+
+
+CLUSTER_GOING_DOWN/INBOUND_COMING_UP:
+
+       The cluster is (or was) being torn down, but another CPU has
+       come online in the meantime and is trying to set up the cluster
+       again.
+
+       If the outbound CPU observes this state, it has two choices:
+
+               a) back out of teardown, restoring the cluster to the
+                  CLUSTER_UP state;
+
+               b) finish tearing the cluster down and put the cluster
+                  in the CLUSTER_DOWN state; the inbound CPU will
+                  set up the cluster again from there.
+
+       Choice (a) permits the removal of some latency by avoiding
+       unnecessary teardown and setup operations in situations where
+       the cluster is not really going to be powered down.
+
+
+       Next states:
+
+       CLUSTER_UP/INBOUND_COMING_UP (outbound)
+               Conditions:     cluster-level setup and hardware
+                               coherency complete
+               Trigger events: (spontaneous)
+
+       CLUSTER_DOWN/INBOUND_COMING_UP (outbound)
+               Conditions:     cluster torn down and ready to power off
+               Trigger events: (spontaneous)
+
+
+Last man and First man selection
+--------------------------------
+
+The CPU which performs cluster tear-down operations on the outbound side
+is commonly referred to as the "last man".
+
+The CPU which performs cluster setup on the inbound side is commonly
+referred to as the "first man".
+
+The race avoidance algorithm documented above does not provide a
+mechanism to choose which CPUs should play these roles.
+
+
+Last man:
+
+When shutting down the cluster, all the CPUs involved are initially
+executing Linux and hence coherent.  Therefore, ordinary spinlocks can
+be used to select a last man safely, before the CPUs become
+non-coherent.
+
+
+First man:
+
+Because CPUs may power up asynchronously in response to external wake-up
+events, a dynamic mechanism is needed to make sure that only one CPU
+attempts to play the first man role and do the cluster-level
+initialisation: any other CPUs must wait for this to complete before
+proceeding.
+
+Cluster-level initialisation may involve actions such as configuring
+coherency controls in the bus fabric.
+
+The current implementation in mcpm_head.S uses a separate mutual exclusion
+mechanism to do this arbitration.  This mechanism is documented in
+detail in vlocks.txt.
+
+
+Features and Limitations
+------------------------
+
+Implementation:
+
+       The current ARM-based implementation is split between
+       arch/arm/common/mcpm_head.S (low-level inbound CPU operations) and
+       arch/arm/common/mcpm_entry.c (everything else):
+
+       __mcpm_cpu_going_down() signals the transition of a CPU to the
+               CPU_GOING_DOWN state.
+
+       __mcpm_cpu_down() signals the transition of a CPU to the CPU_DOWN
+               state.
+
+       A CPU transitions to CPU_COMING_UP and then to CPU_UP via the
+               low-level power-up code in mcpm_head.S.  This could
+               involve CPU-specific setup code, but in the current
+               implementation it does not.
+
+       __mcpm_outbound_enter_critical() and __mcpm_outbound_leave_critical()
+               handle transitions from CLUSTER_UP to CLUSTER_GOING_DOWN
+               and from there to CLUSTER_DOWN or back to CLUSTER_UP (in
+               the case of an aborted cluster power-down).
+
+               These functions are more complex than the __mcpm_cpu_*()
+               functions due to the extra inter-CPU coordination which
+               is needed for safe transitions at the cluster level.
+
+       A cluster transitions from CLUSTER_DOWN back to CLUSTER_UP via
+               the low-level power-up code in mcpm_head.S.  This
+               typically involves platform-specific setup code,
+               provided by the platform-specific power_up_setup
+               function registered via mcpm_sync_init.
+
+Deep topologies:
+
+       As currently described and implemented, the algorithm does not
+       support CPU topologies involving more than two levels (i.e.,
+       clusters of clusters are not supported).  The algorithm could be
+       extended by replicating the cluster-level states for the
+       additional topological levels, and modifying the transition
+       rules for the intermediate (non-outermost) cluster levels.
+
+
+Colophon
+--------
+
+Originally created and documented by Dave Martin for Linaro Limited, in
+collaboration with Nicolas Pitre and Achin Gupta.
+
+Copyright (C) 2012-2013  Linaro Limited
+Distributed under the terms of Version 2 of the GNU General Public
+License, as defined in linux/COPYING.
diff --git a/Documentation/arm/vlocks.txt b/Documentation/arm/vlocks.txt
new file mode 100644 (file)
index 0000000..415960a
--- /dev/null
@@ -0,0 +1,211 @@
+vlocks for Bare-Metal Mutual Exclusion
+======================================
+
+Voting Locks, or "vlocks" provide a simple low-level mutual exclusion
+mechanism, with reasonable but minimal requirements on the memory
+system.
+
+These are intended to be used to coordinate critical activity among CPUs
+which are otherwise non-coherent, in situations where the hardware
+provides no other mechanism to support this and ordinary spinlocks
+cannot be used.
+
+
+vlocks make use of the atomicity provided by the memory system for
+writes to a single memory location.  To arbitrate, every CPU "votes for
+itself", by storing a unique number to a common memory location.  The
+final value seen in that memory location when all the votes have been
+cast identifies the winner.
+
+In order to make sure that the election produces an unambiguous result
+in finite time, a CPU will only enter the election in the first place if
+no winner has been chosen and the election does not appear to have
+started yet.
+
+
+Algorithm
+---------
+
+The easiest way to explain the vlocks algorithm is with some pseudo-code:
+
+
+       int currently_voting[NR_CPUS] = { 0, };
+       int last_vote = -1; /* no votes yet */
+
+       bool vlock_trylock(int this_cpu)
+       {
+               /* signal our desire to vote */
+               currently_voting[this_cpu] = 1;
+               if (last_vote != -1) {
+                       /* someone already volunteered himself */
+                       currently_voting[this_cpu] = 0;
+                       return false; /* not ourself */
+               }
+
+               /* let's suggest ourself */
+               last_vote = this_cpu;
+               currently_voting[this_cpu] = 0;
+
+               /* then wait until everyone else is done voting */
+               for_each_cpu(i) {
+                       while (currently_voting[i] != 0)
+                               /* wait */;
+               }
+
+               /* result */
+               if (last_vote == this_cpu)
+                       return true; /* we won */
+               return false;
+       }
+
+       bool vlock_unlock(void)
+       {
+               last_vote = -1;
+       }
+
+
+The currently_voting[] array provides a way for the CPUs to determine
+whether an election is in progress, and plays a role analogous to the
+"entering" array in Lamport's bakery algorithm [1].
+
+However, once the election has started, the underlying memory system
+atomicity is used to pick the winner.  This avoids the need for a static
+priority rule to act as a tie-breaker, or any counters which could
+overflow.
+
+As long as the last_vote variable is globally visible to all CPUs, it
+will contain only one value that won't change once every CPU has cleared
+its currently_voting flag.
+
+
+Features and limitations
+------------------------
+
+ * vlocks are not intended to be fair.  In the contended case, it is the
+   _last_ CPU which attempts to get the lock which will be most likely
+   to win.
+
+   vlocks are therefore best suited to situations where it is necessary
+   to pick a unique winner, but it does not matter which CPU actually
+   wins.
+
+ * Like other similar mechanisms, vlocks will not scale well to a large
+   number of CPUs.
+
+   vlocks can be cascaded in a voting hierarchy to permit better scaling
+   if necessary, as in the following hypothetical example for 4096 CPUs:
+
+       /* first level: local election */
+       my_town = towns[(this_cpu >> 4) & 0xf];
+       I_won = vlock_trylock(my_town, this_cpu & 0xf);
+       if (I_won) {
+               /* we won the town election, let's go for the state */
+               my_state = states[(this_cpu >> 8) & 0xf];
+               I_won = vlock_lock(my_state, this_cpu & 0xf));
+               if (I_won) {
+                       /* and so on */
+                       I_won = vlock_lock(the_whole_country, this_cpu & 0xf];
+                       if (I_won) {
+                               /* ... */
+                       }
+                       vlock_unlock(the_whole_country);
+               }
+               vlock_unlock(my_state);
+       }
+       vlock_unlock(my_town);
+
+
+ARM implementation
+------------------
+
+The current ARM implementation [2] contains some optimisations beyond
+the basic algorithm:
+
+ * By packing the members of the currently_voting array close together,
+   we can read the whole array in one transaction (providing the number
+   of CPUs potentially contending the lock is small enough).  This
+   reduces the number of round-trips required to external memory.
+
+   In the ARM implementation, this means that we can use a single load
+   and comparison:
+
+       LDR     Rt, [Rn]
+       CMP     Rt, #0
+
+   ...in place of code equivalent to:
+
+       LDRB    Rt, [Rn]
+       CMP     Rt, #0
+       LDRBEQ  Rt, [Rn, #1]
+       CMPEQ   Rt, #0
+       LDRBEQ  Rt, [Rn, #2]
+       CMPEQ   Rt, #0
+       LDRBEQ  Rt, [Rn, #3]
+       CMPEQ   Rt, #0
+
+   This cuts down on the fast-path latency, as well as potentially
+   reducing bus contention in contended cases.
+
+   The optimisation relies on the fact that the ARM memory system
+   guarantees coherency between overlapping memory accesses of
+   different sizes, similarly to many other architectures.  Note that
+   we do not care which element of currently_voting appears in which
+   bits of Rt, so there is no need to worry about endianness in this
+   optimisation.
+
+   If there are too many CPUs to read the currently_voting array in
+   one transaction then multiple transations are still required.  The
+   implementation uses a simple loop of word-sized loads for this
+   case.  The number of transactions is still fewer than would be
+   required if bytes were loaded individually.
+
+
+   In principle, we could aggregate further by using LDRD or LDM, but
+   to keep the code simple this was not attempted in the initial
+   implementation.
+
+
+ * vlocks are currently only used to coordinate between CPUs which are
+   unable to enable their caches yet.  This means that the
+   implementation removes many of the barriers which would be required
+   when executing the algorithm in cached memory.
+
+   packing of the currently_voting array does not work with cached
+   memory unless all CPUs contending the lock are cache-coherent, due
+   to cache writebacks from one CPU clobbering values written by other
+   CPUs.  (Though if all the CPUs are cache-coherent, you should be
+   probably be using proper spinlocks instead anyway).
+
+
+ * The "no votes yet" value used for the last_vote variable is 0 (not
+   -1 as in the pseudocode).  This allows statically-allocated vlocks
+   to be implicitly initialised to an unlocked state simply by putting
+   them in .bss.
+
+   An offset is added to each CPU's ID for the purpose of setting this
+   variable, so that no CPU uses the value 0 for its ID.
+
+
+Colophon
+--------
+
+Originally created and documented by Dave Martin for Linaro Limited, for
+use in ARM-based big.LITTLE platforms, with review and input gratefully
+received from Nicolas Pitre and Achin Gupta.  Thanks to Nicolas for
+grabbing most of this text out of the relevant mail thread and writing
+up the pseudocode.
+
+Copyright (C) 2012-2013  Linaro Limited
+Distributed under the terms of Version 2 of the GNU General Public
+License, as defined in linux/COPYING.
+
+
+References
+----------
+
+[1] Lamport, L. "A New Solution of Dijkstra's Concurrent Programming
+    Problem", Communications of the ACM 17, 8 (August 1974), 453-455.
+
+    http://en.wikipedia.org/wiki/Lamport%27s_bakery_algorithm
+
+[2] linux/arch/arm/common/vlock.S, www.kernel.org.
index 4609e81dbc37fc2dbfa005ff607890df3a8bbc6b..8ccbf27aead4def123c94eaf0de40e39db40199f 100644 (file)
@@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        is selected automatically. Check
                        Documentation/kdump/kdump.txt for further details.
 
-       crashkernel_low=size[KMG]
-                       [KNL, x86] parts under 4G.
-
        crashkernel=range1:size1[,range2:size2,...][@offset]
                        [KNL] Same as above, but depends on the memory
                        in the running system. The syntax of range is
@@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        a memory unit (amount[KMG]). See also
                        Documentation/kdump/kdump.txt for an example.
 
+       crashkernel=size[KMG],high
+                       [KNL, x86_64] range could be above 4G. Allow kernel
+                       to allocate physical memory region from top, so could
+                       be above 4G if system have more than 4G ram installed.
+                       Otherwise memory region will be allocated below 4G, if
+                       available.
+                       It will be ignored if crashkernel=X is specified.
+       crashkernel=size[KMG],low
+                       [KNL, x86_64] range under 4G. When crashkernel=X,high
+                       is passed, kernel could allocate physical memory region
+                       above 4G, that cause second kernel crash on system
+                       that require some amount of low memory, e.g. swiotlb
+                       requires at least 64M+32K low memory.  Kernel would
+                       try to allocate 72M below 4G automatically.
+                       This one let user to specify own low range under 4G
+                       for second kernel instead.
+                       0: to disable low allocation.
+                       It will be ignored when crashkernel=X,high is not used
+                       or memory reserved is below 4G.
+
        cs89x0_dma=     [HW,NET]
                        Format: <dma>
 
@@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        edd=            [EDD]
                        Format: {"off" | "on" | "skip[mbr]"}
 
+       efi_no_storage_paranoia [EFI; X86]
+                       Using this parameter you can use more than 50% of
+                       your efi variable storage. Use this parameter only if
+                       you are really sure that your UEFI does sane gc and
+                       fulfills the spec otherwise your board may brick.
+
        eisa_irq_edge=  [PARISC,HW]
                        See header of drivers/parisc/eisa.c.
 
index 9cf6783c2ec37a09f7eab3d771243e4219e616d5..46263d808876a957083da89b42dde63f51d2ef26 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
@@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
 # Carefully list dependencies so we do not try to build scripts twice
 # in parallel
 PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
+        asm-generic
        $(Q)$(MAKE) $(build)=$(@)
 
 # Objects we will link into vmlinux / subdirs we need to visit
index 1cacda426a0ea6699528dd0eeedf83032825e09e..00bdfdbdd4a8d0f172139e56d352b4b0ee866d8a 100644 (file)
@@ -58,6 +58,7 @@ config ARM
        select CLONE_BACKWARDS
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
+       select HAVE_CONTEXT_TRACKING
        help
          The ARM series is a line of low-power-consumption RISC chip designs
          licensed by ARM Ltd and targeted at embedded applications and
@@ -1606,6 +1607,14 @@ config HAVE_ARM_TWD
        help
          This options enables support for the ARM timer and watchdog unit
 
+config MCPM
+       bool "Multi-Cluster Power Management"
+       depends on CPU_V7 && SMP
+       help
+         This option provides the common power management infrastructure
+         for (multi-)cluster based systems, such as big.LITTLE based
+         systems.
+
 choice
        prompt "Memory split"
        default VMSPLIT_3G
@@ -1693,8 +1702,9 @@ config SCHED_HRTICK
        def_bool HIGH_RES_TIMERS
 
 config THUMB2_KERNEL
-       bool "Compile the kernel in Thumb-2 mode"
+       bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
        depends on CPU_V7 && !CPU_V6 && !CPU_V6K
+       default y if CPU_THUMBONLY
        select AEABI
        select ARM_ASM_UNIFIED
        select ARM_UNWIND
index 9b31f4311ea2717818d8a387d3e0131d335195d0..791fbeba40c6a32ae78992c5bab0e060f73c02c6 100644 (file)
@@ -602,6 +602,17 @@ config DEBUG_LL_INCLUDE
        default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
        default "mach/debug-macro.S"
 
+config DEBUG_UNCOMPRESS
+       bool
+       default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
+                    !DEBUG_OMAP2PLUS_UART && \
+                    !DEBUG_TEGRA_UART
+
+config UNCOMPRESS_INCLUDE
+       string
+       default "debug/uncompress.h" if ARCH_MULTIPLATFORM
+       default "mach/uncompress.h"
+
 config EARLY_PRINTK
        bool "Early printk"
        depends on DEBUG_LL
index afed28e37ea5776073c9bace41d644b4bd6dda44..3580d57ea21841285bc687d928f39268a325edc1 100644 (file)
@@ -24,6 +24,9 @@ endif
 AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
 HEAD   = head.o
 OBJS   += misc.o decompress.o
+ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
+OBJS   += debug.o
+endif
 FONTC  = $(srctree)/drivers/video/console/font_acorn_8x8.c
 
 # string library code (-Os is enforced to keep it much smaller)
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
new file mode 100644 (file)
index 0000000..6e8382d
--- /dev/null
@@ -0,0 +1,12 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include CONFIG_DEBUG_LL_INCLUDE
+
+ENTRY(putc)
+       addruart r1, r2, r3
+       waituart r3, r1
+       senduart r0, r1
+       busyuart r3, r1
+       mov      pc, lr
+ENDPROC(putc)
index df899834d84ed688a8e77e90e0b584789aba6243..31bd43b8209585d807d745309057013ba627b8a5 100644 (file)
@@ -25,13 +25,7 @@ unsigned int __machine_arch_type;
 static void putstr(const char *ptr);
 extern void error(char *x);
 
-#ifdef CONFIG_ARCH_MULTIPLATFORM
-static inline void putc(int c) {}
-static inline void flush(void) {}
-static inline void arch_decomp_setup(void) {}
-#else
-#include <mach/uncompress.h>
-#endif
+#include CONFIG_UNCOMPRESS_INCLUDE
 
 #ifdef CONFIG_DEBUG_ICEDCC
 
index dc8dd0de5c0f613fd45f1c4d72a4d01c9405af0d..53e68b1631968c80792f83beed9be87410534e03 100644 (file)
@@ -11,3 +11,6 @@ obj-$(CONFIG_SHARP_PARAM)     += sharpsl_param.o
 obj-$(CONFIG_SHARP_SCOOP)      += scoop.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_ARM_TIMER_SP804)  += timer-sp.o
+obj-$(CONFIG_MCPM)             += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
+AFLAGS_mcpm_head.o             := -march=armv7-a
+AFLAGS_vlock.o                 := -march=armv7-a
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
new file mode 100644 (file)
index 0000000..370236d
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
+ *
+ * Created by:  Nicolas Pitre, March 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+
+#include <asm/mcpm.h>
+#include <asm/cacheflush.h>
+#include <asm/idmap.h>
+#include <asm/cputype.h>
+
+extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
+
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
+{
+       unsigned long val = ptr ? virt_to_phys(ptr) : 0;
+       mcpm_entry_vectors[cluster][cpu] = val;
+       sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
+}
+
+static const struct mcpm_platform_ops *platform_ops;
+
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
+{
+       if (platform_ops)
+               return -EBUSY;
+       platform_ops = ops;
+       return 0;
+}
+
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
+{
+       if (!platform_ops)
+               return -EUNATCH; /* try not to shadow power_up errors */
+       might_sleep();
+       return platform_ops->power_up(cpu, cluster);
+}
+
+typedef void (*phys_reset_t)(unsigned long);
+
+void mcpm_cpu_power_down(void)
+{
+       phys_reset_t phys_reset;
+
+       BUG_ON(!platform_ops);
+       BUG_ON(!irqs_disabled());
+
+       /*
+        * Do this before calling into the power_down method,
+        * as it might not always be safe to do afterwards.
+        */
+       setup_mm_for_reboot();
+
+       platform_ops->power_down();
+
+       /*
+        * It is possible for a power_up request to happen concurrently
+        * with a power_down request for the same CPU. In this case the
+        * power_down method might not be able to actually enter a
+        * powered down state with the WFI instruction if the power_up
+        * method has removed the required reset condition.  The
+        * power_down method is then allowed to return. We must perform
+        * a re-entry in the kernel as if the power_up method just had
+        * deasserted reset on the CPU.
+        *
+        * To simplify race issues, the platform specific implementation
+        * must accommodate for the possibility of unordered calls to
+        * power_down and power_up with a usage count. Therefore, if a
+        * call to power_up is issued for a CPU that is not down, then
+        * the next call to power_down must not attempt a full shutdown
+        * but only do the minimum (normally disabling L1 cache and CPU
+        * coherency) and return just as if a concurrent power_up request
+        * had happened as described above.
+        */
+
+       phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+       phys_reset(virt_to_phys(mcpm_entry_point));
+
+       /* should never get here */
+       BUG();
+}
+
+void mcpm_cpu_suspend(u64 expected_residency)
+{
+       phys_reset_t phys_reset;
+
+       BUG_ON(!platform_ops);
+       BUG_ON(!irqs_disabled());
+
+       /* Very similar to mcpm_cpu_power_down() */
+       setup_mm_for_reboot();
+       platform_ops->suspend(expected_residency);
+       phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+       phys_reset(virt_to_phys(mcpm_entry_point));
+       BUG();
+}
+
+int mcpm_cpu_powered_up(void)
+{
+       if (!platform_ops)
+               return -EUNATCH;
+       if (platform_ops->powered_up)
+               platform_ops->powered_up();
+       return 0;
+}
+
+struct sync_struct mcpm_sync;
+
+/*
+ * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
+ *    This must be called at the point of committing to teardown of a CPU.
+ *    The CPU cache (SCTRL.C bit) is expected to still be active.
+ */
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
+{
+       mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
+       sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+}
+
+/*
+ * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
+ *    cluster can be torn down without disrupting this CPU.
+ *    To avoid deadlocks, this must be called before a CPU is powered down.
+ *    The CPU cache (SCTRL.C bit) is expected to be off.
+ *    However L2 cache might or might not be active.
+ */
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
+{
+       dmb();
+       mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
+       sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+       dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
+ * @state: the final state of the cluster:
+ *     CLUSTER_UP: no destructive teardown was done and the cluster has been
+ *         restored to the previous state (CPU cache still active); or
+ *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
+ *         (CPU cache disabled, L2 cache either enabled or disabled).
+ */
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
+{
+       dmb();
+       mcpm_sync.clusters[cluster].cluster = state;
+       sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
+       dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
+ * This function should be called by the last man, after local CPU teardown
+ * is complete.  CPU cache expected to be active.
+ *
+ * Returns:
+ *     false: the critical section was not entered because an inbound CPU was
+ *         observed, or the cluster is already being set up;
+ *     true: the critical section was entered: it is now safe to tear down the
+ *         cluster.
+ */
+bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
+{
+       unsigned int i;
+       struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
+
+       /* Warn inbound CPUs that the cluster is being torn down: */
+       c->cluster = CLUSTER_GOING_DOWN;
+       sync_cache_w(&c->cluster);
+
+       /* Back out if the inbound cluster is already in the critical region: */
+       sync_cache_r(&c->inbound);
+       if (c->inbound == INBOUND_COMING_UP)
+               goto abort;
+
+       /*
+        * Wait for all CPUs to get out of the GOING_DOWN state, so that local
+        * teardown is complete on each CPU before tearing down the cluster.
+        *
+        * If any CPU has been woken up again from the DOWN state, then we
+        * shouldn't be taking the cluster down at all: abort in that case.
+        */
+       sync_cache_r(&c->cpus);
+       for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
+               int cpustate;
+
+               if (i == cpu)
+                       continue;
+
+               while (1) {
+                       cpustate = c->cpus[i].cpu;
+                       if (cpustate != CPU_GOING_DOWN)
+                               break;
+
+                       wfe();
+                       sync_cache_r(&c->cpus[i].cpu);
+               }
+
+               switch (cpustate) {
+               case CPU_DOWN:
+                       continue;
+
+               default:
+                       goto abort;
+               }
+       }
+
+       return true;
+
+abort:
+       __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
+       return false;
+}
+
+int __mcpm_cluster_state(unsigned int cluster)
+{
+       sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
+       return mcpm_sync.clusters[cluster].cluster;
+}
+
+extern unsigned long mcpm_power_up_setup_phys;
+
+int __init mcpm_sync_init(
+       void (*power_up_setup)(unsigned int affinity_level))
+{
+       unsigned int i, j, mpidr, this_cluster;
+
+       BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
+       BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
+
+       /*
+        * Set initial CPU and cluster states.
+        * Only one cluster is assumed to be active at this point.
+        */
+       for (i = 0; i < MAX_NR_CLUSTERS; i++) {
+               mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
+               mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
+               for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
+                       mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
+       }
+       mpidr = read_cpuid_mpidr();
+       this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       for_each_online_cpu(i)
+               mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
+       mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
+       sync_cache_w(&mcpm_sync);
+
+       if (power_up_setup) {
+               mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
+               sync_cache_w(&mcpm_power_up_setup_phys);
+       }
+
+       return 0;
+}
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
new file mode 100644 (file)
index 0000000..8178705
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
+ *
+ * Created by:  Nicolas Pitre, March 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
+ * for details of the synchronisation algorithms used here.
+ */
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+#include "vlock.h"
+
+.if MCPM_SYNC_CLUSTER_CPUS
+.error "cpus must be the first member of struct mcpm_sync_struct"
+.endif
+
+       .macro  pr_dbg  string
+#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
+       b       1901f
+1902:  .asciz  "CPU"
+1903:  .asciz  " cluster"
+1904:  .asciz  ": \string"
+       .align
+1901:  adr     r0, 1902b
+       bl      printascii
+       mov     r0, r9
+       bl      printhex8
+       adr     r0, 1903b
+       bl      printascii
+       mov     r0, r10
+       bl      printhex8
+       adr     r0, 1904b
+       bl      printascii
+#endif
+       .endm
+
+       .arm
+       .align
+
+ENTRY(mcpm_entry_point)
+
+ THUMB(        adr     r12, BSYM(1f)   )
+ THUMB(        bx      r12             )
+ THUMB(        .thumb                  )
+1:
+       mrc     p15, 0, r0, c0, c0, 5           @ MPIDR
+       ubfx    r9, r0, #0, #8                  @ r9 = cpu
+       ubfx    r10, r0, #8, #8                 @ r10 = cluster
+       mov     r3, #MAX_CPUS_PER_CLUSTER
+       mla     r4, r3, r10, r9                 @ r4 = canonical CPU index
+       cmp     r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
+       blo     2f
+
+       /* We didn't expect this CPU.  Try to cheaply make it quiet. */
+1:     wfi
+       wfe
+       b       1b
+
+2:     pr_dbg  "kernel mcpm_entry_point\n"
+
+       /*
+        * MMU is off so we need to get to various variables in a
+        * position independent way.
+        */
+       adr     r5, 3f
+       ldmia   r5, {r6, r7, r8, r11}
+       add     r6, r5, r6                      @ r6 = mcpm_entry_vectors
+       ldr     r7, [r5, r7]                    @ r7 = mcpm_power_up_setup_phys
+       add     r8, r5, r8                      @ r8 = mcpm_sync
+       add     r11, r5, r11                    @ r11 = first_man_locks
+
+       mov     r0, #MCPM_SYNC_CLUSTER_SIZE
+       mla     r8, r0, r10, r8                 @ r8 = sync cluster base
+
+       @ Signal that this CPU is coming UP:
+       mov     r0, #CPU_COMING_UP
+       mov     r5, #MCPM_SYNC_CPU_SIZE
+       mla     r5, r9, r5, r8                  @ r5 = sync cpu address
+       strb    r0, [r5]
+
+       @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
+       @ state, because there is at least one active CPU (this CPU).
+
+       mov     r0, #VLOCK_SIZE
+       mla     r11, r0, r10, r11               @ r11 = cluster first man lock
+       mov     r0, r11
+       mov     r1, r9                          @ cpu
+       bl      vlock_trylock                   @ implies DMB
+
+       cmp     r0, #0                          @ failed to get the lock?
+       bne     mcpm_setup_wait         @ wait for cluster setup if so
+
+       ldrb    r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+       cmp     r0, #CLUSTER_UP                 @ cluster already up?
+       bne     mcpm_setup                      @ if not, set up the cluster
+
+       @ Otherwise, release the first man lock and skip setup:
+       mov     r0, r11
+       bl      vlock_unlock
+       b       mcpm_setup_complete
+
+mcpm_setup:
+       @ Control dependency implies strb not observable before previous ldrb.
+
+       @ Signal that the cluster is being brought up:
+       mov     r0, #INBOUND_COMING_UP
+       strb    r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+       dmb
+
+       @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
+       @ point onwards will observe INBOUND_COMING_UP and abort.
+
+       @ Wait for any previously-pending cluster teardown operations to abort
+       @ or complete:
+mcpm_teardown_wait:
+       ldrb    r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+       cmp     r0, #CLUSTER_GOING_DOWN
+       bne     first_man_setup
+       wfe
+       b       mcpm_teardown_wait
+
+first_man_setup:
+       dmb
+
+       @ If the outbound gave up before teardown started, skip cluster setup:
+
+       cmp     r0, #CLUSTER_UP
+       beq     mcpm_setup_leave
+
+       @ power_up_setup is now responsible for setting up the cluster:
+
+       cmp     r7, #0
+       mov     r0, #1          @ second (cluster) affinity level
+       blxne   r7              @ Call power_up_setup if defined
+       dmb
+
+       mov     r0, #CLUSTER_UP
+       strb    r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+       dmb
+
+mcpm_setup_leave:
+       @ Leave the cluster setup critical section:
+
+       mov     r0, #INBOUND_NOT_COMING_UP
+       strb    r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+       dsb
+       sev
+
+       mov     r0, r11
+       bl      vlock_unlock    @ implies DMB
+       b       mcpm_setup_complete
+
+       @ In the contended case, non-first men wait here for cluster setup
+       @ to complete:
+mcpm_setup_wait:
+       ldrb    r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+       cmp     r0, #CLUSTER_UP
+       wfene
+       bne     mcpm_setup_wait
+       dmb
+
+mcpm_setup_complete:
+       @ If a platform-specific CPU setup hook is needed, it is
+       @ called from here.
+
+       cmp     r7, #0
+       mov     r0, #0          @ first (CPU) affinity level
+       blxne   r7              @ Call power_up_setup if defined
+       dmb
+
+       @ Mark the CPU as up:
+
+       mov     r0, #CPU_UP
+       strb    r0, [r5]
+
+       @ Observability order of CPU_UP and opening of the gate does not matter.
+
+mcpm_entry_gated:
+       ldr     r5, [r6, r4, lsl #2]            @ r5 = CPU entry vector
+       cmp     r5, #0
+       wfeeq
+       beq     mcpm_entry_gated
+       dmb
+
+       pr_dbg  "released\n"
+       bx      r5
+
+       .align  2
+
+3:     .word   mcpm_entry_vectors - .
+       .word   mcpm_power_up_setup_phys - 3b
+       .word   mcpm_sync - 3b
+       .word   first_man_locks - 3b
+
+ENDPROC(mcpm_entry_point)
+
+       .bss
+
+       .align  CACHE_WRITEBACK_ORDER
+       .type   first_man_locks, #object
+first_man_locks:
+       .space  VLOCK_SIZE * MAX_NR_CLUSTERS
+       .align  CACHE_WRITEBACK_ORDER
+
+       .type   mcpm_entry_vectors, #object
+ENTRY(mcpm_entry_vectors)
+       .space  4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
+       .type   mcpm_power_up_setup_phys, #object
+ENTRY(mcpm_power_up_setup_phys)
+       .space  4               @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
new file mode 100644 (file)
index 0000000..52b88d8
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * linux/arch/arm/mach-vexpress/mcpm_platsmp.c
+ *
+ * Created by:  Nicolas Pitre, November 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Code to handle secondary CPU bringup and hotplug for the cluster power API.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/mcpm.h>
+#include <asm/smp.h>
+#include <asm/smp_plat.h>
+
+static void __init simple_smp_init_cpus(void)
+{
+}
+
+static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+       unsigned int mpidr, pcpu, pcluster, ret;
+       extern void secondary_startup(void);
+
+       mpidr = cpu_logical_map(cpu);
+       pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
+                __func__, cpu, pcpu, pcluster);
+
+       mcpm_set_entry_vector(pcpu, pcluster, NULL);
+       ret = mcpm_cpu_power_up(pcpu, pcluster);
+       if (ret)
+               return ret;
+       mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
+       arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+       dsb_sev();
+       return 0;
+}
+
+static void __cpuinit mcpm_secondary_init(unsigned int cpu)
+{
+       mcpm_cpu_powered_up();
+       gic_secondary_init(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int mcpm_cpu_disable(unsigned int cpu)
+{
+       /*
+        * We assume all CPUs may be shut down.
+        * This would be the hook to use for eventual Secure
+        * OS migration requests as described in the PSCI spec.
+        */
+       return 0;
+}
+
+static void mcpm_cpu_die(unsigned int cpu)
+{
+       unsigned int mpidr, pcpu, pcluster;
+       mpidr = read_cpuid_mpidr();
+       pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       mcpm_set_entry_vector(pcpu, pcluster, NULL);
+       mcpm_cpu_power_down();
+}
+
+#endif
+
+static struct smp_operations __initdata mcpm_smp_ops = {
+       .smp_init_cpus          = simple_smp_init_cpus,
+       .smp_boot_secondary     = mcpm_boot_secondary,
+       .smp_secondary_init     = mcpm_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+       .cpu_disable            = mcpm_cpu_disable,
+       .cpu_die                = mcpm_cpu_die,
+#endif
+};
+
+void __init mcpm_smp_set_ops(void)
+{
+       smp_set_ops(&mcpm_smp_ops);
+}
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644 (file)
index 0000000..ff19858
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * vlock.S - simple voting lock implementation for ARM
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright:  (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * This algorithm is described in more detail in
+ * Documentation/arm/vlocks.txt.
+ */
+
+#include <linux/linkage.h>
+#include "vlock.h"
+
+/* Select different code if voting flags  can fit in a single word. */
+#if VLOCK_VOTING_SIZE > 4
+#define FEW(x...)
+#define MANY(x...) x
+#else
+#define FEW(x...) x
+#define MANY(x...)
+#endif
+
+@ voting lock for first-man coordination
+
+.macro voting_begin rbase:req, rcpu:req, rscratch:req
+       mov     \rscratch, #1
+       strb    \rscratch, [\rbase, \rcpu]
+       dmb
+.endm
+
+.macro voting_end rbase:req, rcpu:req, rscratch:req
+       dmb
+       mov     \rscratch, #0
+       strb    \rscratch, [\rbase, \rcpu]
+       dsb
+       sev
+.endm
+
+/*
+ * The vlock structure must reside in Strongly-Ordered or Device memory.
+ * This implementation deliberately eliminates most of the barriers which
+ * would be required for other memory types, and assumes that independent
+ * writes to neighbouring locations within a cacheline do not interfere
+ * with one another.
+ */
+
+@ r0: lock structure base
+@ r1: CPU ID (0-based index within cluster)
+ENTRY(vlock_trylock)
+       add     r1, r1, #VLOCK_VOTING_OFFSET
+
+       voting_begin    r0, r1, r2
+
+       ldrb    r2, [r0, #VLOCK_OWNER_OFFSET]   @ check whether lock is held
+       cmp     r2, #VLOCK_OWNER_NONE
+       bne     trylock_fail                    @ fail if so
+
+       @ Control dependency implies strb not observable before previous ldrb.
+
+       strb    r1, [r0, #VLOCK_OWNER_OFFSET]   @ submit my vote
+
+       voting_end      r0, r1, r2              @ implies DMB
+
+       @ Wait for the current round of voting to finish:
+
+ MANY( mov     r3, #VLOCK_VOTING_OFFSET                        )
+0:
+ MANY( ldr     r2, [r0, r3]                                    )
+ FEW(  ldr     r2, [r0, #VLOCK_VOTING_OFFSET]                  )
+       cmp     r2, #0
+       wfene
+       bne     0b
+ MANY( add     r3, r3, #4                                      )
+ MANY( cmp     r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE    )
+ MANY( bne     0b                                              )
+
+       @ Check who won:
+
+       dmb
+       ldrb    r2, [r0, #VLOCK_OWNER_OFFSET]
+       eor     r0, r1, r2                      @ zero if I won, else nonzero
+       bx      lr
+
+trylock_fail:
+       voting_end      r0, r1, r2
+       mov     r0, #1                          @ nonzero indicates that I lost
+       bx      lr
+ENDPROC(vlock_trylock)
+
+@ r0: lock structure base
+ENTRY(vlock_unlock)
+       dmb
+       mov     r1, #VLOCK_OWNER_NONE
+       strb    r1, [r0, #VLOCK_OWNER_OFFSET]
+       dsb
+       sev
+       bx      lr
+ENDPROC(vlock_unlock)
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h
new file mode 100644 (file)
index 0000000..3b44147
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * vlock.h - simple voting lock implementation
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright:  (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VLOCK_H
+#define __VLOCK_H
+
+#include <asm/mcpm.h>
+
+/* Offsets and sizes are rounded to a word (4 bytes) */
+#define VLOCK_OWNER_OFFSET     0
+#define VLOCK_VOTING_OFFSET    4
+#define VLOCK_VOTING_SIZE      ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
+#define VLOCK_SIZE             (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
+#define VLOCK_OWNER_NONE       0
+
+#endif /* ! __VLOCK_H */
index c79f61faa3a55e81ae773b6d9d83532821b24c33..da1c77d39327963ab10e633aeb8809aac7da2dec 100644 (file)
@@ -243,6 +243,29 @@ typedef struct {
 
 #define ATOMIC64_INIT(i) { (i) }
 
+#ifdef CONFIG_ARM_LPAE
+static inline u64 atomic64_read(const atomic64_t *v)
+{
+       u64 result;
+
+       __asm__ __volatile__("@ atomic64_read\n"
+"      ldrd    %0, %H0, [%1]"
+       : "=&r" (result)
+       : "r" (&v->counter), "Qo" (v->counter)
+       );
+
+       return result;
+}
+
+static inline void atomic64_set(atomic64_t *v, u64 i)
+{
+       __asm__ __volatile__("@ atomic64_set\n"
+"      strd    %2, %H2, [%1]"
+       : "=Qo" (v->counter)
+       : "r" (&v->counter), "r" (i)
+       );
+}
+#else
 static inline u64 atomic64_read(const atomic64_t *v)
 {
        u64 result;
@@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
+#endif
 
 static inline void atomic64_add(u64 i, atomic64_t *v)
 {
index e1489c54cd12b4dcb8b997e8e327e2875b183b0c..bff71388e72a163c4c18c93fc745f72db9b29e00 100644 (file)
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
                flush_cache_all();
 }
 
+/*
+ * Memory synchronization helpers for mixed cached vs non cached accesses.
+ *
+ * Some synchronization algorithms have to set states in memory with the
+ * cache enabled or disabled depending on the code path.  It is crucial
+ * to always ensure proper cache maintenance to update main memory right
+ * away in that case.
+ *
+ * Any cached write must be followed by a cache clean operation.
+ * Any cached read must be preceded by a cache invalidate operation.
+ * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
+ * operation is needed to avoid discarding possible concurrent writes to the
+ * accessed memory.
+ *
+ * Also, in order to prevent a cached writer from interfering with an
+ * adjacent non-cached writer, each state variable must be located to
+ * a separate cache line.
+ */
+
+/*
+ * This needs to be >= the max cache writeback size of all
+ * supported platforms included in the current kernel configuration.
+ * This is used to align state variables to their own cache lines.
+ */
+#define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */
+#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
+
+/*
+ * There is no __cpuc_clean_dcache_area but we use it anyway for
+ * code intent clarity, and alias it to __cpuc_flush_dcache_area.
+ */
+#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
+
+/*
+ * Ensure preceding writes to *p by this CPU are visible to
+ * subsequent reads by other CPUs:
+ */
+static inline void __sync_cache_range_w(volatile void *p, size_t size)
+{
+       char *_p = (char *)p;
+
+       __cpuc_clean_dcache_area(_p, size);
+       outer_clean_range(__pa(_p), __pa(_p + size));
+}
+
+/*
+ * Ensure preceding writes to *p by other CPUs are visible to
+ * subsequent reads by this CPU.  We must be careful not to
+ * discard data simultaneously written by another CPU, hence the
+ * usage of flush rather than invalidate operations.
+ */
+static inline void __sync_cache_range_r(volatile void *p, size_t size)
+{
+       char *_p = (char *)p;
+
+#ifdef CONFIG_OUTER_CACHE
+       if (outer_cache.flush_range) {
+               /*
+                * Ensure dirty data migrated from other CPUs into our cache
+                * are cleaned out safely before the outer cache is cleaned:
+                */
+               __cpuc_clean_dcache_area(_p, size);
+
+               /* Clean and invalidate stale data for *p from outer ... */
+               outer_flush_range(__pa(_p), __pa(_p + size));
+       }
+#endif
+
+       /* ... and inner cache: */
+       __cpuc_flush_dcache_area(_p, size);
+}
+
+#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
+#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+
 #endif
index 5ef4d8015a6043432bbc86f8905e0d635deb1f23..1f3262e99d81e6e7be2bf3864b6649d32a39b0da 100644 (file)
@@ -42,6 +42,8 @@
 #define vectors_high() (0)
 #endif
 
+#ifdef CONFIG_CPU_CP15
+
 extern unsigned long cr_no_alignment;  /* defined in entry-armv.S */
 extern unsigned long cr_alignment;     /* defined in entry-armv.S */
 
@@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val)
        isb();
 }
 
-#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+/*
+ * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
+ * minds of the developers). Yielding 0 for machines without a cp15 (and making
+ * it read-only) is fine for most cases and saves quite some #ifdeffery.
+ */
+#define cr_no_alignment        UL(0)
+#define cr_alignment   UL(0)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#endif /* ifndef __ASSEMBLY__ */
 
 #endif
index ad41ec2471e87e7ac5cdf71839b764532e116d06..7652712d1d149ea07a8e5052746391014124ffdf 100644 (file)
 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
        ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
 
+#define ARM_CPU_IMP_ARM                        0x41
+#define ARM_CPU_IMP_INTEL              0x69
+
+#define ARM_CPU_PART_ARM1136           0xB360
+#define ARM_CPU_PART_ARM1156           0xB560
+#define ARM_CPU_PART_ARM1176           0xB760
+#define ARM_CPU_PART_ARM11MPCORE       0xB020
+#define ARM_CPU_PART_CORTEX_A8         0xC080
+#define ARM_CPU_PART_CORTEX_A9         0xC090
+#define ARM_CPU_PART_CORTEX_A5         0xC050
+#define ARM_CPU_PART_CORTEX_A15                0xC0F0
+#define ARM_CPU_PART_CORTEX_A7         0xC070
+
+#define ARM_CPU_XSCALE_ARCH_MASK       0xe000
+#define ARM_CPU_XSCALE_ARCH_V1         0x2000
+#define ARM_CPU_XSCALE_ARCH_V2         0x4000
+#define ARM_CPU_XSCALE_ARCH_V3         0x6000
+
 extern unsigned int processor_id;
 
 #ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
                    : "cc");                                            \
                __val;                                                  \
        })
+
 #define read_cpuid_ext(ext_reg)                                                \
        ({                                                              \
                unsigned int __val;                                     \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
                    : "cc");                                            \
                __val;                                                  \
        })
-#else
-#define read_cpuid(reg) (processor_id)
-#define read_cpuid_ext(reg) 0
-#endif
 
-#define ARM_CPU_IMP_ARM                        0x41
-#define ARM_CPU_IMP_INTEL              0x69
+#else /* ifdef CONFIG_CPU_CP15 */
 
-#define ARM_CPU_PART_ARM1136           0xB360
-#define ARM_CPU_PART_ARM1156           0xB560
-#define ARM_CPU_PART_ARM1176           0xB760
-#define ARM_CPU_PART_ARM11MPCORE       0xB020
-#define ARM_CPU_PART_CORTEX_A8         0xC080
-#define ARM_CPU_PART_CORTEX_A9         0xC090
-#define ARM_CPU_PART_CORTEX_A5         0xC050
-#define ARM_CPU_PART_CORTEX_A15                0xC0F0
-#define ARM_CPU_PART_CORTEX_A7         0xC070
+/*
+ * read_cpuid and read_cpuid_ext should only ever be called on machines that
+ * have cp15 so warn on other usages.
+ */
+#define read_cpuid(reg)                                                        \
+       ({                                                              \
+               WARN_ON_ONCE(1);                                        \
+               0;                                                      \
+       })
 
-#define ARM_CPU_XSCALE_ARCH_MASK       0xe000
-#define ARM_CPU_XSCALE_ARCH_V1         0x2000
-#define ARM_CPU_XSCALE_ARCH_V2         0x4000
-#define ARM_CPU_XSCALE_ARCH_V3         0x6000
+#define read_cpuid_ext(reg) read_cpuid(reg)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
 
+#ifdef CONFIG_CPU_CP15
 /*
  * The CPU ID never changes at run time, so we might as well tell the
  * compiler that it's constant.  Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
        return read_cpuid(CPUID_ID);
 }
 
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+       return processor_id;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
 {
        return (read_cpuid_id() & 0xFF000000) >> 24;
index cca9f15704ed82bb2726ce8898611be42d28b5ff..ea289e1435e742807f48550200582de5630bf817 100644 (file)
 #undef _CACHE
 #undef MULTI_CACHE
 
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE v3
-# endif
-#endif
-
 #if defined(CONFIG_CPU_CACHE_V4)
 # ifdef _CACHE
 #  define MULTI_CACHE 1
index 8cacbcda76da986d6cf0c95d6b604eb50c574b0c..b6e9f2c108b5d726659938961684dc3374dfe5f0 100644 (file)
  *     ================
  *
  *     We have the following to choose from:
- *       arm6          - ARM6 style
  *       arm7          - ARM7 style
  *       v4_early      - ARMv4 without Thumb early abort handler
  *       v4t_late      - ARMv4 with Thumb late abort handler
  *       v4t_early     - ARMv4 with Thumb early abort handler
- *       v5tej_early   - ARMv5 with Thumb and Java early abort handler
+ *       v5t_early     - ARMv5 with Thumb early abort handler
+ *       v5tj_early    - ARMv5 with Thumb and Java early abort handler
  *       xscale        - ARMv5 with Thumb with Xscale extensions
  *       v6_early      - ARMv6 generic early abort handler
  *       v7_early      - ARMv7 generic early abort handler
 # endif
 #endif
 
-#ifdef CONFIG_CPU_ABRT_LV4T
+#ifdef CONFIG_CPU_ABRT_EV4
 # ifdef CPU_DABORT_HANDLER
 #  define MULTI_DABORT 1
 # else
-#  define CPU_DABORT_HANDLER v4t_late_abort
+#  define CPU_DABORT_HANDLER v4_early_abort
 # endif
 #endif
 
-#ifdef CONFIG_CPU_ABRT_EV4
+#ifdef CONFIG_CPU_ABRT_LV4T
 # ifdef CPU_DABORT_HANDLER
 #  define MULTI_DABORT 1
 # else
-#  define CPU_DABORT_HANDLER v4_early_abort
+#  define CPU_DABORT_HANDLER v4t_late_abort
 # endif
 #endif
 
 # endif
 #endif
 
-#ifdef CONFIG_CPU_ABRT_EV5TJ
+#ifdef CONFIG_CPU_ABRT_EV5T
 # ifdef CPU_DABORT_HANDLER
 #  define MULTI_DABORT 1
 # else
-#  define CPU_DABORT_HANDLER v5tj_early_abort
+#  define CPU_DABORT_HANDLER v5t_early_abort
 # endif
 #endif
 
-#ifdef CONFIG_CPU_ABRT_EV5T
+#ifdef CONFIG_CPU_ABRT_EV5TJ
 # ifdef CPU_DABORT_HANDLER
 #  define MULTI_DABORT 1
 # else
-#  define CPU_DABORT_HANDLER v5t_early_abort
+#  define CPU_DABORT_HANDLER v5tj_early_abort
 # endif
 #endif
 
index 02fe2fbe2477078b4fa8da59c6f2a416fdb71913..ed94b1a366ae62d9535c66847ebe85abf4f4f0c0 100644 (file)
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
  * IOP3XX processor registers
  */
 #define IOP3XX_PERIPHERAL_PHYS_BASE    0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE    0xfeffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE    0xfedfe000
 #define IOP3XX_PERIPHERAL_SIZE         0x00002000
 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
                                        IOP3XX_PERIPHERAL_SIZE - 1)
index 7c3d813e15dffe76720472252995c27a22950cd6..124623e5ef14ab2bde6b6ab954901fa4d4b8c3a7 100644 (file)
 
 #define HSR_HVC_IMM_MASK       ((1UL << 16) - 1)
 
+#define HSR_DABT_S1PTW         (1U << 7)
+#define HSR_DABT_CM            (1U << 8)
+#define HSR_DABT_EA            (1U << 9)
+
 #endif /* __ARM_KVM_ARM_H__ */
index e4956f4e23e11f94e1c9cc3526ab1e7034b80fc8..18d50322a9e2fd147c361717097dba4d1a2ba35d 100644 (file)
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 
 extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 #endif
index fd611996bfb5c15f53ae4b11107fc7af32087432..82b4babead2c8b7bff97a2c9b0e3f3fc37ba63f7 100644 (file)
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
+#include <asm/kvm_arm.h>
 
-u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
 
-int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+bool kvm_condition_valid(struct kvm_vcpu *vcpu);
 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
 {
-       return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+       return &vcpu->arch.regs.usr_regs.ARM_pc;
 }
 
-static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
 {
-       return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+       return &vcpu->arch.regs.usr_regs.ARM_cpsr;
 }
 
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
        return reg == 15;
 }
 
+static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.fault.hsr;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.fault.hxfar;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
+{
+       return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
+}
+
+static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.fault.hyp_pc;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
+}
+
+static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+}
+
+static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+{
+       return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+}
+
+/* Get Access Size from a data abort */
+static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+{
+       switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
+       case 0:
+               return 1;
+       case 1:
+               return 2;
+       case 2:
+               return 4;
+       default:
+               kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+               return -EFAULT;
+       }
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+}
+
+static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
+}
+
+static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+}
+
 #endif /* __ARM_KVM_EMULATE_H__ */
index d1736a53b12d423560e402adc373d79e834f782f..0c4e643d939ea12be15ad3a63254e25ff6c4be76 100644 (file)
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
        void *objects[KVM_NR_MEM_OBJS];
 };
 
+struct kvm_vcpu_fault_info {
+       u32 hsr;                /* Hyp Syndrome Register */
+       u32 hxfar;              /* Hyp Data/Inst. Fault Address Register */
+       u32 hpfar;              /* Hyp IPA Fault Address Register */
+       u32 hyp_pc;             /* PC when exception was taken from Hyp mode */
+};
+
+typedef struct vfp_hard_struct kvm_kernel_vfp_t;
+
 struct kvm_vcpu_arch {
        struct kvm_regs regs;
 
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
        u32 midr;
 
        /* Exception Information */
-       u32 hsr;                /* Hyp Syndrome Register */
-       u32 hxfar;              /* Hyp Data/Inst Fault Address Register */
-       u32 hpfar;              /* Hyp IPA Fault Address Register */
+       struct kvm_vcpu_fault_info fault;
 
        /* Floating point registers (VFP and Advanced SIMD/NEON) */
-       struct vfp_hard_struct vfp_guest;
-       struct vfp_hard_struct *vfp_host;
+       kvm_kernel_vfp_t vfp_guest;
+       kvm_kernel_vfp_t *vfp_host;
 
        /* VGIC state */
        struct vgic_cpu vgic_cpu;
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
        /* Interrupt related fields */
        u32 irq_lines;          /* IRQ and FIQ levels */
 
-       /* Hyp exception information */
-       u32 hyp_pc;             /* PC when exception was taken from Hyp mode */
-
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
@@ -181,4 +185,26 @@ struct kvm_one_reg;
 int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
 
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+               int exception_index);
+
+static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
+                                      unsigned long hyp_stack_ptr,
+                                      unsigned long vector_ptr)
+{
+       unsigned long pgd_low, pgd_high;
+
+       pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
+       pgd_high = (pgd_ptr >> 32ULL);
+
+       /*
+        * Call initialization code, and switch to the full blown
+        * HYP code. The init code doesn't need to preserve these registers as
+        * r1-r3 and r12 are already callee save according to the AAPCS.
+        * Note that we slightly misuse the prototype by casing the pgd_low to
+        * a void *.
+        */
+       kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+}
+
 #endif /* __ARM_KVM_HOST_H__ */
index 421a20b34874384dcba6190d633adaef2afa8f4d..970f3b5fa109492ed2c46de8c9239bdfb1e5872c 100644 (file)
 #ifndef __ARM_KVM_MMU_H__
 #define __ARM_KVM_MMU_H__
 
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+#include <asm/idmap.h>
+
+/*
+ * We directly use the kernel VA for the HYP, as we can directly share
+ * the mapping (HTTBR "covers" TTBR1).
+ */
+#define HYP_PAGE_OFFSET_MASK   (~0UL)
+#define HYP_PAGE_OFFSET                PAGE_OFFSET
+#define KERN_TO_HYP(kva)       (kva)
+
 int create_hyp_mappings(void *from, void *to);
 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
 void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
 int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
+static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
+{
+       pte_val(*pte) = new_pte;
+       /*
+        * flush_pmd_entry just takes a void pointer and cleans the necessary
+        * cache entries, so we can reuse the function for ptes.
+        */
+       flush_pmd_entry(pte);
+}
+
 static inline bool kvm_is_write_fault(unsigned long hsr)
 {
        unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
                return true;
 }
 
+static inline void kvm_clean_pgd(pgd_t *pgd)
+{
+       clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+}
+
+static inline void kvm_clean_pmd_entry(pmd_t *pmd)
+{
+       clean_pmd_entry(pmd);
+}
+
+static inline void kvm_clean_pte(pte_t *pte)
+{
+       clean_pte_table(pte);
+}
+
+static inline void kvm_set_s2pte_writable(pte_t *pte)
+{
+       pte_val(*pte) |= L_PTE_S2_RDWR;
+}
+
+struct kvm;
+
+static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+{
+       /*
+        * If we are going to insert an instruction page and the icache is
+        * either VIPT or PIPT, there is a potential problem where the host
+        * (or another VM) may have used the same page as this guest, and we
+        * read incorrect data from the icache.  If we're using a PIPT cache,
+        * we can invalidate just that page, but if we are using a VIPT cache
+        * we need to invalidate the entire icache - damn shame - as written
+        * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+        *
+        * VIVT caches are tagged using both the ASID and the VMID and doesn't
+        * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+        */
+       if (icache_is_pipt()) {
+               unsigned long hva = gfn_to_hva(kvm, gfn);
+               __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+       } else if (!icache_is_vivt_asid_tagged()) {
+               /* any kind of VIPT cache */
+               __flush_icache_all();
+       }
+}
+
 #endif /* __ARM_KVM_MMU_H__ */
index ab97207d9cd3b76aa4aabd774dd399da88892997..343744e4809c17f6c93269c1070edfd43c7575ee 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <linux/kernel.h>
 #include <linux/kvm.h>
-#include <linux/kvm_host.h>
 #include <linux/irqreturn.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
index 5cf2e979b4be442f53e7e28369def61a1911c774..7d2c3c8438014a40158e62347915a097d7174cce 100644 (file)
@@ -30,6 +30,11 @@ struct hw_pci {
        void            (*postinit)(void);
        u8              (*swizzle)(struct pci_dev *dev, u8 *pin);
        int             (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+       resource_size_t (*align_resource)(struct pci_dev *dev,
+                                         const struct resource *res,
+                                         resource_size_t start,
+                                         resource_size_t size,
+                                         resource_size_t align);
 };
 
 /*
@@ -51,6 +56,12 @@ struct pci_sys_data {
        u8              (*swizzle)(struct pci_dev *, u8 *);
                                        /* IRQ mapping                          */
        int             (*map_irq)(const struct pci_dev *, u8, u8);
+                                       /* Resource alignement requirements     */
+       resource_size_t (*align_resource)(struct pci_dev *dev,
+                                         const struct resource *res,
+                                         resource_size_t start,
+                                         resource_size_t size,
+                                         resource_size_t align);
        void            *private_data;  /* platform controller private data     */
 };
 
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644 (file)
index 0000000..0f7b762
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * arch/arm/include/asm/mcpm.h
+ *
+ * Created by:  Nicolas Pitre, April 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MCPM_H
+#define MCPM_H
+
+/*
+ * Maximum number of possible clusters / CPUs per cluster.
+ *
+ * This should be sufficient for quite a while, while keeping the
+ * (assembly) code simpler.  When this starts to grow then we'll have
+ * to consider dynamic allocation.
+ */
+#define MAX_CPUS_PER_CLUSTER   4
+#define MAX_NR_CLUSTERS                2
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Platform specific code should use this symbol to set up secondary
+ * entry location for processors to use when released from reset.
+ */
+extern void mcpm_entry_point(void);
+
+/*
+ * This is used to indicate where the given CPU from given cluster should
+ * branch once it is ready to re-enter the kernel using ptr, or NULL if it
+ * should be gated.  A gated CPU is held in a WFE loop until its vector
+ * becomes non NULL.
+ */
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
+
+/*
+ * CPU/cluster power operations API for higher subsystems to use.
+ */
+
+/**
+ * mcpm_cpu_power_up - make given CPU in given cluster runable
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * The identified CPU is brought out of reset.  If the cluster was powered
+ * down then it is brought up as well, taking care not to let the other CPUs
+ * in the cluster run, and ensuring appropriate cluster setup.
+ *
+ * Caller must ensure the appropriate entry vector is initialized with
+ * mcpm_set_entry_vector() prior to calling this.
+ *
+ * This must be called in a sleepable context.  However, the implementation
+ * is strongly encouraged to return early and let the operation happen
+ * asynchronously, especially when significant delays are expected.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_power_down - power the calling CPU down
+ *
+ * The calling CPU is powered down.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster is prepared for power-down too.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * This does not return.  Re-entry in the kernel is expected via
+ * mcpm_entry_point.
+ */
+void mcpm_cpu_power_down(void);
+
+/**
+ * mcpm_cpu_suspend - bring the calling CPU in a suspended state
+ *
+ * @expected_residency: duration in microseconds the CPU is expected
+ *                     to remain suspended, or 0 if unknown/infinity.
+ *
+ * The calling CPU is suspended.  The expected residency argument is used
+ * as a hint by the platform specific backend to implement the appropriate
+ * sleep state level according to the knowledge it has on wake-up latency
+ * for the given hardware.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster may be prepared for power-down too, if the expected
+ * residency makes it worthwhile.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * This does not return.  Re-entry in the kernel is expected via
+ * mcpm_entry_point.
+ */
+void mcpm_cpu_suspend(u64 expected_residency);
+
+/**
+ * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
+ *
+ * This lets the platform specific backend code perform needed housekeeping
+ * work.  This must be called by the newly activated CPU as soon as it is
+ * fully operational in kernel space, before it enables interrupts.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_powered_up(void);
+
+/*
+ * Platform specific methods used in the implementation of the above API.
+ */
+struct mcpm_platform_ops {
+       int (*power_up)(unsigned int cpu, unsigned int cluster);
+       void (*power_down)(void);
+       void (*suspend)(u64);
+       void (*powered_up)(void);
+};
+
+/**
+ * mcpm_platform_register - register platform specific power methods
+ *
+ * @ops: mcpm_platform_ops structure to register
+ *
+ * An error is returned if the registration has been done previously.
+ */
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
+
+/* Synchronisation structures for coordinating safe cluster setup/teardown: */
+
+/*
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+       /* individual CPU states */
+       struct {
+               s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+       } cpus[MAX_CPUS_PER_CLUSTER];
+
+       /* cluster state */
+       s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+       /* inbound-side state */
+       s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+       struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
+extern unsigned long sync_phys;        /* physical address of *mcpm_sync */
+
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
+bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
+int __mcpm_cluster_state(unsigned int cluster);
+
+int __init mcpm_sync_init(
+       void (*power_up_setup)(unsigned int affinity_level));
+
+void __init mcpm_smp_set_ops(void);
+
+#else
+
+/* 
+ * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
+ * cannot be included in asm files.  Let's work around the conflict like this.
+ */
+#include <asm/asm-offsets.h>
+#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Definitions for mcpm_sync_struct */
+#define CPU_DOWN               0x11
+#define CPU_COMING_UP          0x12
+#define CPU_UP                 0x13
+#define CPU_GOING_DOWN         0x14
+
+#define CLUSTER_DOWN           0x21
+#define CLUSTER_UP             0x22
+#define CLUSTER_GOING_DOWN     0x23
+
+#define INBOUND_NOT_COMING_UP  0x31
+#define INBOUND_COMING_UP      0x32
+
+/*
+ * Offsets for the mcpm_sync_struct members, for use in asm.
+ * We don't want to make them global to the kernel via asm-offsets.c.
+ */
+#define MCPM_SYNC_CLUSTER_CPUS 0
+#define MCPM_SYNC_CPU_SIZE     __CACHE_WRITEBACK_GRANULE
+#define MCPM_SYNC_CLUSTER_CLUSTER \
+       (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
+#define MCPM_SYNC_CLUSTER_INBOUND \
+       (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
+#define MCPM_SYNC_CLUSTER_SIZE \
+       (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
+
+#endif
index 6ef8afd1b64cbc3c27ea025d81d0e67d43f0400a..86b8fe398b9514d89a9032658f6bb3ad221b069e 100644 (file)
 #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
 #define L_PTE_S2_MT_WRITEBACK   (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
 #define L_PTE_S2_RDONLY                 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
-#define L_PTE_S2_RDWR           (_AT(pteval_t, 2) << 6)   /* HAP[2:1] */
+#define L_PTE_S2_RDWR           (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
 /*
  * Hyp-mode PL2 PTE definitions for LPAE.
index 80d6fc4dbe4aac34b2111c94e4adc473166f6c68..9bcd262a900842dfc685991511d4850fb3601ff5 100644 (file)
@@ -60,6 +60,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
  */
 #define FIRST_USER_ADDRESS     PAGE_SIZE
 
+/*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+ * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
+ * page shared between user and kernel).
+ */
+#ifdef CONFIG_ARM_LPAE
+#define USER_PGTABLES_CEILING  TASK_SIZE
+#endif
+
 /*
  * The pgprot_* and protection_map entries will be fixed up in runtime
  * to include the cachable and bufferable bits based on memory policy,
index cddda1f41f0f5fcfefe6d438937f5afcff2e9cf8..1995d1a840609f3df26114e2ca3eed388a0d5f8e 100644 (file)
@@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_SYSCALL_AUDIT      9
 #define TIF_SYSCALL_TRACEPOINT 10
 #define TIF_SECCOMP            11      /* seccomp syscall filtering active */
+#define TIF_NOHZ               12      /* in adaptive nohz mode */
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20
index 9e9c041358ca8789e4a6798aaf871007795e8fb9..a3625d141c1d417389e14a176d175ce359ebf081 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <asm/glue.h>
 
-#define TLB_V3_PAGE    (1 << 0)
 #define TLB_V4_U_PAGE  (1 << 1)
 #define TLB_V4_D_PAGE  (1 << 2)
 #define TLB_V4_I_PAGE  (1 << 3)
@@ -22,7 +21,6 @@
 #define TLB_V6_D_PAGE  (1 << 5)
 #define TLB_V6_I_PAGE  (1 << 6)
 
-#define TLB_V3_FULL    (1 << 8)
 #define TLB_V4_U_FULL  (1 << 9)
 #define TLB_V4_D_FULL  (1 << 10)
 #define TLB_V4_I_FULL  (1 << 11)
@@ -52,7 +50,6 @@
  *     =============
  *
  *     We have the following to choose from:
- *       v3    - ARMv3
  *       v4    - ARMv4 without write buffer
  *       v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  *       v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
 # define v6wbi_always_flags    (-1UL)
 #endif
 
-#define v7wbi_tlb_flags_smp    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
+#define v7wbi_tlb_flags_smp    (TLB_WB | TLB_BARRIER | \
                                 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
                                 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
 #define v7wbi_tlb_flags_up     (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
        tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
        tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
        tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+       if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
                if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
-                       tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
                        tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
                        tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
                        tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+       if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
            cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
-               tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
                tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
                tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
                tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
        tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
        tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
        tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h
new file mode 100644 (file)
index 0000000..0e2949b
--- /dev/null
@@ -0,0 +1,7 @@
+#ifdef CONFIG_DEBUG_UNCOMPRESS
+extern void putc(int c);
+#else
+static inline void putc(int c) {}
+#endif
+static inline void flush(void) {}
+static inline void arch_decomp_setup(void) {}
index 023bfeb367bf0066af20ddd4b82b684c1b8df80f..c1ee007523d78dd25b1dd21661af605da4aa7ef3 100644 (file)
 #define KVM_ARM_FIQ_spsr       fiq_regs[7]
 
 struct kvm_regs {
-       struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
-       __u32 svc_regs[3];      /* SP_svc, LR_svc, SPSR_svc */
-       __u32 abt_regs[3];      /* SP_abt, LR_abt, SPSR_abt */
-       __u32 und_regs[3];      /* SP_und, LR_und, SPSR_und */
-       __u32 irq_regs[3];      /* SP_irq, LR_irq, SPSR_irq */
-       __u32 fiq_regs[8];      /* R8_fiq - R14_fiq, SPSR_fiq */
+       struct pt_regs usr_regs;        /* R0_usr - R14_usr, PC, CPSR */
+       unsigned long svc_regs[3];      /* SP_svc, LR_svc, SPSR_svc */
+       unsigned long abt_regs[3];      /* SP_abt, LR_abt, SPSR_abt */
+       unsigned long und_regs[3];      /* SP_und, LR_und, SPSR_und */
+       unsigned long irq_regs[3];      /* SP_irq, LR_irq, SPSR_irq */
+       unsigned long fiq_regs[8];      /* R8_fiq - R14_fiq, SPSR_fiq */
 };
 
 /* Supported Processor Types */
index 923eec7105cff3cbb5f1eccd7e044a25e4cfdbf3..a53efa9936906128661b4abf189c4ffe3994c037 100644 (file)
@@ -149,6 +149,10 @@ int main(void)
   DEFINE(DMA_BIDIRECTIONAL,    DMA_BIDIRECTIONAL);
   DEFINE(DMA_TO_DEVICE,                DMA_TO_DEVICE);
   DEFINE(DMA_FROM_DEVICE,      DMA_FROM_DEVICE);
+  BLANK();
+  DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
+  DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
+  BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_KVM,             offsetof(struct kvm_vcpu, kvm));
   DEFINE(VCPU_MIDR,            offsetof(struct kvm_vcpu, arch.midr));
@@ -165,10 +169,10 @@ int main(void)
   DEFINE(VCPU_PC,              offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
   DEFINE(VCPU_CPSR,            offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
   DEFINE(VCPU_IRQ_LINES,       offsetof(struct kvm_vcpu, arch.irq_lines));
-  DEFINE(VCPU_HSR,             offsetof(struct kvm_vcpu, arch.hsr));
-  DEFINE(VCPU_HxFAR,           offsetof(struct kvm_vcpu, arch.hxfar));
-  DEFINE(VCPU_HPFAR,           offsetof(struct kvm_vcpu, arch.hpfar));
-  DEFINE(VCPU_HYP_PC,          offsetof(struct kvm_vcpu, arch.hyp_pc));
+  DEFINE(VCPU_HSR,             offsetof(struct kvm_vcpu, arch.fault.hsr));
+  DEFINE(VCPU_HxFAR,           offsetof(struct kvm_vcpu, arch.fault.hxfar));
+  DEFINE(VCPU_HPFAR,           offsetof(struct kvm_vcpu, arch.fault.hpfar));
+  DEFINE(VCPU_HYP_PC,          offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
 #ifdef CONFIG_KVM_ARM_VGIC
   DEFINE(VCPU_VGIC_CPU,                offsetof(struct kvm_vcpu, arch.vgic_cpu));
   DEFINE(VGIC_CPU_HCR,         offsetof(struct vgic_cpu, vgic_hcr));
index a1f73b502ef0e787e4b47b5e8cf583f6a1c4ed9d..b2ed73c454892ba8c7a7c06d66ef93881b6c2126 100644 (file)
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
                sys->busnr   = busnr;
                sys->swizzle = hw->swizzle;
                sys->map_irq = hw->map_irq;
+               sys->align_resource = hw->align_resource;
                INIT_LIST_HEAD(&sys->resources);
 
                if (hw->private_data)
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)
 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
                                resource_size_t size, resource_size_t align)
 {
+       struct pci_dev *dev = data;
+       struct pci_sys_data *sys = dev->sysdata;
        resource_size_t start = res->start;
 
        if (res->flags & IORESOURCE_IO && start & 0x300)
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 
        start = (start + align - 1) & ~(align - 1);
 
+       if (sys->align_resource)
+               return sys->align_resource(dev, res, start, size, align);
+
        return start;
 }
 
index 0f82098c9bfe3618115dcc4e8b2d74d95c7ddcf6..c66ca7e4ee9176978ffde9076cdca54809cae7cf 100644 (file)
@@ -192,18 +192,6 @@ __dabt_svc:
        svc_entry
        mov     r2, sp
        dabt_helper
-
-       @
-       @ IRQs off again before pulling preserved data off the stack
-       @
-       disable_irq_notrace
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       tst     r5, #PSR_I_BIT
-       bleq    trace_hardirqs_on
-       tst     r5, #PSR_I_BIT
-       blne    trace_hardirqs_off
-#endif
        svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__dabt_svc)
@@ -223,12 +211,7 @@ __irq_svc:
        blne    svc_preempt
 #endif
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-       @ The parent context IRQs must have been enabled to get here in
-       @ the first place, so there's no point checking the PSR I bit.
-       bl      trace_hardirqs_on
-#endif
-       svc_exit r5                             @ return from exception
+       svc_exit r5, irq = 1                    @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__irq_svc)
 
@@ -295,22 +278,8 @@ __und_svc_fault:
        mov     r0, sp                          @ struct pt_regs *regs
        bl      __und_fault
 
-       @
-       @ IRQs off again before pulling preserved data off the stack
-       @
 __und_svc_finish:
-       disable_irq_notrace
-
-       @
-       @ restore SPSR and restart the instruction
-       @
        ldr     r5, [sp, #S_PSR]                @ Get SVC cpsr
-#ifdef CONFIG_TRACE_IRQFLAGS
-       tst     r5, #PSR_I_BIT
-       bleq    trace_hardirqs_on
-       tst     r5, #PSR_I_BIT
-       blne    trace_hardirqs_off
-#endif
        svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__und_svc)
@@ -320,18 +289,6 @@ __pabt_svc:
        svc_entry
        mov     r2, sp                          @ regs
        pabt_helper
-
-       @
-       @ IRQs off again before pulling preserved data off the stack
-       @
-       disable_irq_notrace
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       tst     r5, #PSR_I_BIT
-       bleq    trace_hardirqs_on
-       tst     r5, #PSR_I_BIT
-       blne    trace_hardirqs_off
-#endif
        svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__pabt_svc)
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc)
 #ifdef CONFIG_IRQSOFF_TRACER
        bl      trace_hardirqs_off
 #endif
+       ct_user_exit save = 0
        .endm
 
        .macro  kuser_cmpxchg_check
index fefd7f971437a084bedb5d4281fd6e78bac59d04..bc5bc0a971319674c552b578367a19a5a464d798 100644 (file)
@@ -35,12 +35,11 @@ ret_fast_syscall:
        ldr     r1, [tsk, #TI_FLAGS]
        tst     r1, #_TIF_WORK_MASK
        bne     fast_work_pending
-#if defined(CONFIG_IRQSOFF_TRACER)
        asm_trace_hardirqs_on
-#endif
 
        /* perform architecture specific actions before user return */
        arch_ret_to_user r1, lr
+       ct_user_enter
 
        restore_user_regs fast = 1, offset = S_OFF
  UNWIND(.fnend         )
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq)
        tst     r1, #_TIF_WORK_MASK
        bne     work_pending
 no_work_pending:
-#if defined(CONFIG_IRQSOFF_TRACER)
        asm_trace_hardirqs_on
-#endif
+
        /* perform architecture specific actions before user return */
        arch_ret_to_user r1, lr
+       ct_user_enter save = 0
 
        restore_user_regs fast = 0, offset = 0
 ENDPROC(ret_to_user_from_irq)
@@ -406,6 +405,7 @@ ENTRY(vector_swi)
        mcr     p15, 0, ip, c1, c0              @ update control register
 #endif
        enable_irq
+       ct_user_exit
 
        get_thread_info tsk
        adr     tbl, sys_call_table             @ load syscall table pointer
index 9a8531eadd3da437ccadd6f5b60eac6a16808607..160f3376ba6da8f1a697ef62cb9180795a0576e6 100644 (file)
        .endm
 
 #ifndef CONFIG_THUMB2_KERNEL
-       .macro  svc_exit, rpsr
+       .macro  svc_exit, rpsr, irq = 0
+       .if     \irq != 0
+       @ IRQs already off
+#ifdef CONFIG_TRACE_IRQFLAGS
+       @ The parent context IRQs must have been enabled to get here in
+       @ the first place, so there's no point checking the PSR I bit.
+       bl      trace_hardirqs_on
+#endif
+       .else
+       @ IRQs off again before pulling preserved data off the stack
+       disable_irq_notrace
+#ifdef CONFIG_TRACE_IRQFLAGS
+       tst     \rpsr, #PSR_I_BIT
+       bleq    trace_hardirqs_on
+       tst     \rpsr, #PSR_I_BIT
+       blne    trace_hardirqs_off
+#endif
+       .endif
        msr     spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6)
        ldr     r0, [sp]
        mov     pc, \reg
        .endm
 #else  /* CONFIG_THUMB2_KERNEL */
-       .macro  svc_exit, rpsr
+       .macro  svc_exit, rpsr, irq = 0
+       .if     \irq != 0
+       @ IRQs already off
+#ifdef CONFIG_TRACE_IRQFLAGS
+       @ The parent context IRQs must have been enabled to get here in
+       @ the first place, so there's no point checking the PSR I bit.
+       bl      trace_hardirqs_on
+#endif
+       .else
+       @ IRQs off again before pulling preserved data off the stack
+       disable_irq_notrace
+#ifdef CONFIG_TRACE_IRQFLAGS
+       tst     \rpsr, #PSR_I_BIT
+       bleq    trace_hardirqs_on
+       tst     \rpsr, #PSR_I_BIT
+       blne    trace_hardirqs_off
+#endif
+       .endif
        ldr     lr, [sp, #S_SP]                 @ top of the stack
        ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
        clrex                                   @ clear the exclusive monitor
        .endm
 #endif /* !CONFIG_THUMB2_KERNEL */
 
+/*
+ * Context tracking subsystem.  Used to instrument transitions
+ * between user and kernel mode.
+ */
+       .macro ct_user_exit, save = 1
+#ifdef CONFIG_CONTEXT_TRACKING
+       .if     \save
+       stmdb   sp!, {r0-r3, ip, lr}
+       bl      user_exit
+       ldmia   sp!, {r0-r3, ip, lr}
+       .else
+       bl      user_exit
+       .endif
+#endif
+       .endm
+
+       .macro ct_user_enter, save = 1
+#ifdef CONFIG_CONTEXT_TRACKING
+       .if     \save
+       stmdb   sp!, {r0-r3, ip, lr}
+       bl      user_enter
+       ldmia   sp!, {r0-r3, ip, lr}
+       .else
+       bl      user_enter
+       .endif
+#endif
+       .endm
+
 /*
  * These are the registers used in the syscall handler, and allow us to
  * have in theory up to 7 arguments to a function - r0 to r6.
index 854bd22380d335dba0e6761c317ccf6f45d3b447..5b391a689b47b7c75864937991d03cf5dbc9904a 100644 (file)
@@ -98,8 +98,9 @@ __mmap_switched:
        str     r9, [r4]                        @ Save processor ID
        str     r1, [r5]                        @ Save machine type
        str     r2, [r6]                        @ Save atags pointer
-       bic     r4, r0, #CR_A                   @ Clear 'A' bit
-       stmia   r7, {r0, r4}                    @ Save control register values
+       cmp     r7, #0
+       bicne   r4, r0, #CR_A                   @ Clear 'A' bit
+       stmneia r7, {r0, r4}                    @ Save control register values
        b       start_kernel
 ENDPROC(__mmap_switched)
 
@@ -113,7 +114,11 @@ __mmap_switched_data:
        .long   processor_id                    @ r4
        .long   __machine_arch_type             @ r5
        .long   __atags_pointer                 @ r6
+#ifdef CONFIG_CPU_CP15
        .long   cr_alignment                    @ r7
+#else
+       .long   0                               @ r7
+#endif
        .long   init_thread_union + THREAD_START_SP @ sp
        .size   __mmap_switched_data, . - __mmap_switched_data
 
index 2c228a07e58c7f023a5df0828e6ceacc52793be9..6a2e09c952c715862cb32907af2438e39d5b9788 100644 (file)
  * numbers for r1.
  *
  */
-       .arm
 
        __HEAD
+
+#ifdef CONFIG_CPU_THUMBONLY
+       .thumb
+ENTRY(stext)
+#else
+       .arm
 ENTRY(stext)
 
  THUMB(        adr     r9, BSYM(1f)    )       @ Kernel is always entered in ARM.
  THUMB(        bx      r9              )       @ If this is a Thumb-2 kernel,
  THUMB(        .thumb                  )       @ switch to Thumb now.
  THUMB(1:                      )
+#endif
 
        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
                                                @ and irqs disabled
index 5dc1aa6f0f7d75e9339094a1da1b61eb018f32f3..1fd749ee4a1bb96c1740f4a131fde1d3cd53bfb0 100644 (file)
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+static struct notifier_block dbg_cpu_pm_nb = {
        .notifier_call = dbg_cpu_pm_notify,
 };
 
index 146157dfe27c6c991b8507c2c3f248d810754cc0..8c3094d0f7b78426e367e2cf3ed538e54d686cbd 100644 (file)
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        struct pmu *leader_pmu = event->group_leader->pmu;
 
-       if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+               return 1;
+
+       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
                return 1;
 
        return armpmu->get_event_idx(hw_events, event) >= 0;
index 8085417555dd7c49176cd06d88354dd1b2faca67..fafedd86885ddeb22c232a373239dd17ba620d3a 100644 (file)
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
        struct return_address_data *data = d;
 
        if (!data->level) {
-               data->addr = (void *)frame->lr;
+               data->addr = (void *)frame->pc;
 
                return 1;
        } else {
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
        struct stackframe frame;
        register unsigned long current_sp asm ("sp");
 
-       data.level = level + 1;
+       data.level = level + 2;
+       data.addr = NULL;
 
        frame.fp = (unsigned long)__builtin_frame_address(0);
        frame.sp = current_sp;
index bd6f56b9ec2101534b7477c11c9f6ef0a3a6e68b..59d2adb764a995f9174a6494bd2bcc1974c10e73 100644 (file)
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
 
 static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
 
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 {
        return (cyc * mult) >> shift;
 }
 
-static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
 {
        u64 epoch_ns;
        u32 epoch_cyc;
index d343a6c3a6d1f26ec10ca55f914785b65b4fa473..728007c4a2b7eaf18e038d93a8075699d7a479dc 100644 (file)
@@ -56,7 +56,6 @@
 #include <asm/virt.h>
 
 #include "atags.h"
-#include "tcm.h"
 
 
 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -291,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)
 
 static void __init cacheid_init(void)
 {
-       unsigned int cachetype = read_cpuid_cachetype();
        unsigned int arch = cpu_architecture();
 
        if (arch >= CPU_ARCH_ARMv6) {
+               unsigned int cachetype = read_cpuid_cachetype();
                if ((cachetype & (7 << 29)) == 4 << 29) {
                        /* ARMv7 register format */
                        arch = CPU_ARCH_ARMv7;
@@ -390,7 +389,7 @@ static void __init feat_v6_fixup(void)
  *
  * cpu_init sets up the per-CPU stacks.
  */
-void cpu_init(void)
+void notrace cpu_init(void)
 {
        unsigned int cpu = smp_processor_id();
        struct stack *stk = &stacks[cpu];
@@ -798,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_crashkernel();
 
-       tcm_init();
-
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
index 45eac87ed66a692859e7431af684f0b33b3ffeed..5bc1a63284e3913a19afc53c71aefc1847e4c667 100644 (file)
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)
 
 #ifdef CONFIG_ARM_ERRATA_764369
        /* Cortex-A9 only */
-       if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+       if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
                scu_ctrl = __raw_readl(scu_base + 0x30);
                if (!(scu_ctrl & 1))
                        __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
index e82e1d24877227ba65ab716dc6c87e110617bd79..9a52a07aa40ee3c017c55b5b5cce3b92ae7c599c 100644 (file)
@@ -98,21 +98,21 @@ static void broadcast_tlb_a15_erratum(void)
                return;
 
        dummy_flush_tlb_a15_erratum();
-       smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
-                              NULL, 1);
+       smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
 }
 
 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
 {
-       int cpu;
+       int cpu, this_cpu;
        cpumask_t mask = { CPU_BITS_NONE };
 
        if (!erratum_a15_798181())
                return;
 
        dummy_flush_tlb_a15_erratum();
+       this_cpu = get_cpu();
        for_each_online_cpu(cpu) {
-               if (cpu == smp_processor_id())
+               if (cpu == this_cpu)
                        continue;
                /*
                 * We only need to send an IPI if the other CPUs are running
@@ -127,6 +127,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
                        cpumask_set_cpu(cpu, &mask);
        }
        smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+       put_cpu();
 }
 
 void flush_tlb_all(void)
index 30ae6bb4a310c12723b7bc7ca26ea91ae25bb770..f50f19e5c138839bede270baf3531a41aa5af7c4 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/mach/map.h>
 #include <asm/memory.h>
 #include <asm/system_info.h>
-#include "tcm.h"
 
 static struct gen_pool *tcm_pool;
 static bool dtcm_present;
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h
deleted file mode 100644 (file)
index 8015ad4..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * TCM memory handling for ARM systems
- *
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Rickard Andersson <rickard.andersson@stericsson.com>
- */
-
-#ifdef CONFIG_HAVE_TCM
-void __init tcm_init(void);
-#else
-/* No TCM support, just blank inlines to be optimized out */
-inline void tcm_init(void)
-{
-}
-#endif
index fc96ce6f2357c683497f80344349449c74e3b077..8dc5e76cb789dcf9cf562e31595ddd840979c49e 100644 (file)
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
 kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
 
 obj-y += kvm-arm.o init.o interrupts.o
-obj-y += arm.o guest.o mmu.o emulate.o reset.o
+obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
 obj-y += coproc.o coproc_a15.o mmio.o psci.o
 obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
 obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
index 5a936988eb24d1e62e5ae248ff6e4a3db59667b9..a0dfc2a53f91135b8c8c7deaf853d59095ab5295 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-#include <asm/unified.h>
 #include <asm/uaccess.h>
 #include <asm/ptrace.h>
 #include <asm/mman.h>
-#include <asm/cputype.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
 #include <asm/virt.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_psci.h>
-#include <asm/opcodes.h>
 
 #ifdef REQUIRES_VIRT
 __asm__(".arch_extension       virt");
 #endif
 
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
+static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;
 static unsigned long hyp_default_vectors;
 
 /* Per-CPU variable containing the currently running vcpu. */
@@ -201,6 +198,7 @@ int kvm_dev_ioctl_check_extension(long ext)
                break;
        case KVM_CAP_ARM_SET_DEVICE_ADDR:
                r = 1;
+               break;
        case KVM_CAP_NR_VCPUS:
                r = num_online_cpus();
                break;
@@ -303,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-int __attribute_const__ kvm_target_cpu(void)
-{
-       unsigned long implementor = read_cpuid_implementor();
-       unsigned long part_number = read_cpuid_part_number();
-
-       if (implementor != ARM_CPU_IMP_ARM)
-               return -EINVAL;
-
-       switch (part_number) {
-       case ARM_CPU_PART_CORTEX_A15:
-               return KVM_ARM_TARGET_CORTEX_A15;
-       default:
-               return -EINVAL;
-       }
-}
-
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
        int ret;
@@ -481,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)
        spin_unlock(&kvm_vmid_lock);
 }
 
-static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       /* SVC called from Hyp mode should never get here */
-       kvm_debug("SVC called from Hyp mode shouldn't go here\n");
-       BUG();
-       return -EINVAL; /* Squash warning */
-}
-
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
-                     vcpu->arch.hsr & HSR_HVC_IMM_MASK);
-
-       if (kvm_psci_call(vcpu))
-               return 1;
-
-       kvm_inject_undefined(vcpu);
-       return 1;
-}
-
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       if (kvm_psci_call(vcpu))
-               return 1;
-
-       kvm_inject_undefined(vcpu);
-       return 1;
-}
-
-static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       /* The hypervisor should never cause aborts */
-       kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
-               vcpu->arch.hxfar, vcpu->arch.hsr);
-       return -EFAULT;
-}
-
-static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       /* This is either an error in the ws. code or an external abort */
-       kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
-               vcpu->arch.hxfar, vcpu->arch.hsr);
-       return -EFAULT;
-}
-
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
-static exit_handle_fn arm_exit_handlers[] = {
-       [HSR_EC_WFI]            = kvm_handle_wfi,
-       [HSR_EC_CP15_32]        = kvm_handle_cp15_32,
-       [HSR_EC_CP15_64]        = kvm_handle_cp15_64,
-       [HSR_EC_CP14_MR]        = kvm_handle_cp14_access,
-       [HSR_EC_CP14_LS]        = kvm_handle_cp14_load_store,
-       [HSR_EC_CP14_64]        = kvm_handle_cp14_access,
-       [HSR_EC_CP_0_13]        = kvm_handle_cp_0_13_access,
-       [HSR_EC_CP10_ID]        = kvm_handle_cp10_id,
-       [HSR_EC_SVC_HYP]        = handle_svc_hyp,
-       [HSR_EC_HVC]            = handle_hvc,
-       [HSR_EC_SMC]            = handle_smc,
-       [HSR_EC_IABT]           = kvm_handle_guest_abort,
-       [HSR_EC_IABT_HYP]       = handle_pabt_hyp,
-       [HSR_EC_DABT]           = kvm_handle_guest_abort,
-       [HSR_EC_DABT_HYP]       = handle_dabt_hyp,
-};
-
-/*
- * A conditional instruction is allowed to trap, even though it
- * wouldn't be executed.  So let's re-implement the hardware, in
- * software!
- */
-static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
-{
-       unsigned long cpsr, cond, insn;
-
-       /*
-        * Exception Code 0 can only happen if we set HCR.TGE to 1, to
-        * catch undefined instructions, and then we won't get past
-        * the arm_exit_handlers test anyway.
-        */
-       BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
-
-       /* Top two bits non-zero?  Unconditional. */
-       if (vcpu->arch.hsr >> 30)
-               return true;
-
-       cpsr = *vcpu_cpsr(vcpu);
-
-       /* Is condition field valid? */
-       if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
-               cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
-       else {
-               /* This can happen in Thumb mode: examine IT state. */
-               unsigned long it;
-
-               it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
-
-               /* it == 0 => unconditional. */
-               if (it == 0)
-                       return true;
-
-               /* The cond for this insn works out as the top 4 bits. */
-               cond = (it >> 4);
-       }
-
-       /* Shift makes it look like an ARM-mode instruction */
-       insn = cond << 28;
-       return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
-}
-
-/*
- * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
- * proper exit to QEMU.
- */
-static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
-                      int exception_index)
-{
-       unsigned long hsr_ec;
-
-       switch (exception_index) {
-       case ARM_EXCEPTION_IRQ:
-               return 1;
-       case ARM_EXCEPTION_UNDEFINED:
-               kvm_err("Undefined exception in Hyp mode at: %#08x\n",
-                       vcpu->arch.hyp_pc);
-               BUG();
-               panic("KVM: Hypervisor undefined exception!\n");
-       case ARM_EXCEPTION_DATA_ABORT:
-       case ARM_EXCEPTION_PREF_ABORT:
-       case ARM_EXCEPTION_HVC:
-               hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
-
-               if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
-                   || !arm_exit_handlers[hsr_ec]) {
-                       kvm_err("Unkown exception class: %#08lx, "
-                               "hsr: %#08x\n", hsr_ec,
-                               (unsigned int)vcpu->arch.hsr);
-                       BUG();
-               }
-
-               /*
-                * See ARM ARM B1.14.1: "Hyp traps on instructions
-                * that fail their condition code check"
-                */
-               if (!kvm_condition_valid(vcpu)) {
-                       bool is_wide = vcpu->arch.hsr & HSR_IL;
-                       kvm_skip_instr(vcpu, is_wide);
-                       return 1;
-               }
-
-               return arm_exit_handlers[hsr_ec](vcpu, run);
-       default:
-               kvm_pr_unimpl("Unsupported exception type: %d",
-                             exception_index);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               return 0;
-       }
-}
-
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 {
        if (likely(vcpu->arch.has_run_once))
@@ -972,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
 static void cpu_init_hyp_mode(void *vector)
 {
        unsigned long long pgd_ptr;
-       unsigned long pgd_low, pgd_high;
        unsigned long hyp_stack_ptr;
        unsigned long stack_page;
        unsigned long vector_ptr;
@@ -981,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)
        __hyp_set_vectors((unsigned long)vector);
 
        pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
-       pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
-       pgd_high = (pgd_ptr >> 32ULL);
        stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
        hyp_stack_ptr = stack_page + PAGE_SIZE;
        vector_ptr = (unsigned long)__kvm_hyp_vector;
 
-       /*
-        * Call initialization code, and switch to the full blown
-        * HYP code. The init code doesn't need to preserve these registers as
-        * r1-r3 and r12 are already callee save according to the AAPCS.
-        * Note that we slightly misuse the prototype by casing the pgd_low to
-        * a void *.
-        */
-       kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+       __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
 }
 
 /**
@@ -1077,7 +892,7 @@ static int init_hyp_mode(void)
        /*
         * Map the host VFP structures
         */
-       kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
+       kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);
        if (!kvm_host_vfp_state) {
                err = -ENOMEM;
                kvm_err("Cannot allocate host VFP state\n");
@@ -1085,7 +900,7 @@ static int init_hyp_mode(void)
        }
 
        for_each_possible_cpu(cpu) {
-               struct vfp_hard_struct *vfp;
+               kvm_kernel_vfp_t *vfp;
 
                vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
                err = create_hyp_mappings(vfp, vfp + 1);
index 4ea9a982269c8f9c9c56808f92b464a025356533..8eea97be1ed52e8ea42d463f166fdc3e69261682 100644 (file)
@@ -76,14 +76,14 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct coproc_params *p,
                        const struct coproc_reg *r)
 {
-       u32 val;
+       unsigned long val;
        int cpu;
 
-       cpu = get_cpu();
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
+       cpu = get_cpu();
+
        cpumask_setall(&vcpu->arch.require_dcache_flush);
        cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
 
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
 
                if (likely(r->access(vcpu, params, r))) {
                        /* Skip instruction, since it was emulated */
-                       kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
                        return 1;
                }
                /* If access function fails, it should complain. */
        } else {
-               kvm_err("Unsupported guest CP15 access at: %08x\n",
+               kvm_err("Unsupported guest CP15 access at: %08lx\n",
                        *vcpu_pc(vcpu));
                print_cp_instr(params);
        }
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        struct coproc_params params;
 
-       params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
-       params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
-       params.is_write = ((vcpu->arch.hsr & 1) == 0);
+       params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+       params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+       params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
        params.is_64bit = true;
 
-       params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
+       params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
        params.Op2 = 0;
-       params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
+       params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
        params.CRn = 0;
 
        return emulate_cp15(vcpu, &params);
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        struct coproc_params params;
 
-       params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
-       params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
-       params.is_write = ((vcpu->arch.hsr & 1) == 0);
+       params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+       params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+       params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
        params.is_64bit = false;
 
-       params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
-       params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
-       params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
+       params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
+       params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
+       params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
        params.Rt2 = 0;
 
        return emulate_cp15(vcpu, &params);
index 992adfafa2ff4eb31c8c0d6e9f22aa2bea681114..b7301d3e479921f4d8983a172c88ec6edababd81 100644 (file)
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
 static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
                                      const struct coproc_params *params)
 {
-       kvm_debug("CP15 write to read-only register at: %08x\n",
+       kvm_debug("CP15 write to read-only register at: %08lx\n",
                  *vcpu_pc(vcpu));
        print_cp_instr(params);
        return false;
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
 static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
                                        const struct coproc_params *params)
 {
-       kvm_debug("CP15 read to write-only register at: %08x\n",
+       kvm_debug("CP15 read to write-only register at: %08lx\n",
                  *vcpu_pc(vcpu));
        print_cp_instr(params);
        return false;
index d61450ac6665ffa0ed5c94cd10ce9f0298434620..bdede9e7da516a43b5a3d681850727860f0534ab 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/kvm_host.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/opcodes.h>
 #include <trace/events/kvm.h>
 
 #include "trace.h"
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
  * Return a pointer to the register number valid in the current mode of
  * the virtual CPU.
  */
-u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
 {
-       u32 *reg_array = (u32 *)&vcpu->arch.regs;
-       u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+       unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
+       unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
 
        switch (mode) {
        case USR_MODE...SVC_MODE:
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
 /*
  * Return the SPSR for the current mode of the virtual CPU.
  */
-u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
 {
-       u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+       unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
        switch (mode) {
        case SVC_MODE:
                return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
        }
 }
 
-/**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
- * @vcpu:      the vcpu pointer
- * @run:       the kvm_run structure pointer
- *
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+/*
+ * A conditional instruction is allowed to trap, even though it
+ * wouldn't be executed.  So let's re-implement the hardware, in
+ * software!
  */
-int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+bool kvm_condition_valid(struct kvm_vcpu *vcpu)
 {
-       trace_kvm_wfi(*vcpu_pc(vcpu));
-       kvm_vcpu_block(vcpu);
-       return 1;
+       unsigned long cpsr, cond, insn;
+
+       /*
+        * Exception Code 0 can only happen if we set HCR.TGE to 1, to
+        * catch undefined instructions, and then we won't get past
+        * the arm_exit_handlers test anyway.
+        */
+       BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
+
+       /* Top two bits non-zero?  Unconditional. */
+       if (kvm_vcpu_get_hsr(vcpu) >> 30)
+               return true;
+
+       cpsr = *vcpu_cpsr(vcpu);
+
+       /* Is condition field valid? */
+       if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
+               cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
+       else {
+               /* This can happen in Thumb mode: examine IT state. */
+               unsigned long it;
+
+               it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+               /* it == 0 => unconditional. */
+               if (it == 0)
+                       return true;
+
+               /* The cond for this insn works out as the top 4 bits. */
+               cond = (it >> 4);
+       }
+
+       /* Shift makes it look like an ARM-mode instruction */
+       insn = cond << 28;
+       return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
 }
 
 /**
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
  */
 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 {
-       u32 new_lr_value;
-       u32 new_spsr_value;
-       u32 cpsr = *vcpu_cpsr(vcpu);
+       unsigned long new_lr_value;
+       unsigned long new_spsr_value;
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
        u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
        bool is_thumb = (cpsr & PSR_T_BIT);
        u32 vect_offset = 4;
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  */
 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
 {
-       u32 new_lr_value;
-       u32 new_spsr_value;
-       u32 cpsr = *vcpu_cpsr(vcpu);
+       unsigned long new_lr_value;
+       unsigned long new_spsr_value;
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
        u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
        bool is_thumb = (cpsr & PSR_T_BIT);
        u32 vect_offset;
index 2339d9609d368867daa5335473b77f031a8f78c3..152d03612181d16d5fef5e1e84da8d2c178fbf58 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        return -EINVAL;
 }
 
+int __attribute_const__ kvm_target_cpu(void)
+{
+       unsigned long implementor = read_cpuid_implementor();
+       unsigned long part_number = read_cpuid_part_number();
+
+       if (implementor != ARM_CPU_IMP_ARM)
+               return -EINVAL;
+
+       switch (part_number) {
+       case ARM_CPU_PART_CORTEX_A15:
+               return KVM_ARM_TARGET_CORTEX_A15;
+       default:
+               return -EINVAL;
+       }
+}
+
 int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
                        const struct kvm_vcpu_init *init)
 {
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
new file mode 100644 (file)
index 0000000..26ad173
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_psci.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+#include "trace.h"
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+
+static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /* SVC called from Hyp mode should never get here */
+       kvm_debug("SVC called from Hyp mode shouldn't go here\n");
+       BUG();
+       return -EINVAL; /* Squash warning */
+}
+
+static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+                     kvm_vcpu_hvc_get_imm(vcpu));
+
+       if (kvm_psci_call(vcpu))
+               return 1;
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       if (kvm_psci_call(vcpu))
+               return 1;
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /* The hypervisor should never cause aborts */
+       kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
+               kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
+       return -EFAULT;
+}
+
+static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /* This is either an error in the ws. code or an external abort */
+       kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
+               kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
+       return -EFAULT;
+}
+
+/**
+ * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * @vcpu:      the vcpu pointer
+ * @run:       the kvm_run structure pointer
+ *
+ * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+ * halt execution of world-switches and schedule other host processes until
+ * there is an incoming IRQ or FIQ to the VM.
+ */
+static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       trace_kvm_wfi(*vcpu_pc(vcpu));
+       kvm_vcpu_block(vcpu);
+       return 1;
+}
+
+static exit_handle_fn arm_exit_handlers[] = {
+       [HSR_EC_WFI]            = kvm_handle_wfi,
+       [HSR_EC_CP15_32]        = kvm_handle_cp15_32,
+       [HSR_EC_CP15_64]        = kvm_handle_cp15_64,
+       [HSR_EC_CP14_MR]        = kvm_handle_cp14_access,
+       [HSR_EC_CP14_LS]        = kvm_handle_cp14_load_store,
+       [HSR_EC_CP14_64]        = kvm_handle_cp14_access,
+       [HSR_EC_CP_0_13]        = kvm_handle_cp_0_13_access,
+       [HSR_EC_CP10_ID]        = kvm_handle_cp10_id,
+       [HSR_EC_SVC_HYP]        = handle_svc_hyp,
+       [HSR_EC_HVC]            = handle_hvc,
+       [HSR_EC_SMC]            = handle_smc,
+       [HSR_EC_IABT]           = kvm_handle_guest_abort,
+       [HSR_EC_IABT_HYP]       = handle_pabt_hyp,
+       [HSR_EC_DABT]           = kvm_handle_guest_abort,
+       [HSR_EC_DABT_HYP]       = handle_dabt_hyp,
+};
+
+static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
+{
+       u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+       if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
+           !arm_exit_handlers[hsr_ec]) {
+               kvm_err("Unkown exception class: hsr: %#08x\n",
+                       (unsigned int)kvm_vcpu_get_hsr(vcpu));
+               BUG();
+       }
+
+       return arm_exit_handlers[hsr_ec];
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to userspace.
+ */
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                      int exception_index)
+{
+       exit_handle_fn exit_handler;
+
+       switch (exception_index) {
+       case ARM_EXCEPTION_IRQ:
+               return 1;
+       case ARM_EXCEPTION_UNDEFINED:
+               kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
+                       kvm_vcpu_get_hyp_pc(vcpu));
+               BUG();
+               panic("KVM: Hypervisor undefined exception!\n");
+       case ARM_EXCEPTION_DATA_ABORT:
+       case ARM_EXCEPTION_PREF_ABORT:
+       case ARM_EXCEPTION_HVC:
+               /*
+                * See ARM ARM B1.14.1: "Hyp traps on instructions
+                * that fail their condition code check"
+                */
+               if (!kvm_condition_valid(vcpu)) {
+                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+                       return 1;
+               }
+
+               exit_handler = kvm_get_exit_handler(vcpu);
+
+               return exit_handler(vcpu, run);
+       default:
+               kvm_pr_unimpl("Unsupported exception type: %d",
+                             exception_index);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               return 0;
+       }
+}
index 8ca87ab0919d8efe6a3d771f293d0a5e6b87dcd2..f7793df62f5834ab60abb19528d2e31f9a756a11 100644 (file)
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
 /********************************************************************
  * Flush per-VMID TLBs
  *
- * void __kvm_tlb_flush_vmid(struct kvm *kvm);
+ * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
  *
  * We rely on the hardware to broadcast the TLB invalidation to all CPUs
  * inside the inner-shareable domain (which is the case for all v7
  * implementations).  If we come across a non-IS SMP implementation, we'll
  * have to use an IPI based mechanism. Until then, we stick to the simple
  * hardware assisted version.
+ *
+ * As v7 does not support flushing per IPA, just nuke the whole TLB
+ * instead, ignoring the ipa value.
  */
-ENTRY(__kvm_tlb_flush_vmid)
+ENTRY(__kvm_tlb_flush_vmid_ipa)
        push    {r2, r3}
 
        add     r0, r0, #KVM_VTTBR
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
 
        pop     {r2, r3}
        bx      lr
-ENDPROC(__kvm_tlb_flush_vmid)
+ENDPROC(__kvm_tlb_flush_vmid_ipa)
 
 /********************************************************************
  * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
  * instruction is issued since all traps are disabled when running the host
  * kernel as per the Hyp-mode initialization at boot time.
  *
- * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
+ * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
  * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
- * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
+ * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
  * instructions are called from within Hyp-mode.
  *
  * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
index 98a870ff1a5c51a2d496f2df84cefe06de9a0aac..72a12f2171b26bba2937e3c288f4bd51f1b5540b 100644 (file)
  */
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       __u32 *dest;
+       unsigned long *dest;
        unsigned int len;
        int mask;
 
        if (!run->mmio.is_write) {
                dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
-               memset(dest, 0, sizeof(int));
+               *dest = 0;
 
                len = run->mmio.len;
-               if (len > 4)
+               if (len > sizeof(unsigned long))
                        return -EINVAL;
 
                memcpy(dest, run->mmio.data, len);
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
                                *((u64 *)run->mmio.data));
 
-               if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
+               if (vcpu->arch.mmio_decode.sign_extend &&
+                   len < sizeof(unsigned long)) {
                        mask = 1U << ((len * 8) - 1);
                        *dest = (*dest ^ mask) - mask;
                }
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        unsigned long rt, len;
        bool is_write, sign_extend;
 
-       if ((vcpu->arch.hsr >> 8) & 1) {
+       if (kvm_vcpu_dabt_isextabt(vcpu)) {
                /* cache operation on I/O addr, tell guest unsupported */
-               kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+               kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
                return 1;
        }
 
-       if ((vcpu->arch.hsr >> 7) & 1) {
+       if (kvm_vcpu_dabt_iss1tw(vcpu)) {
                /* page table accesses IO mem: tell guest to fix its TTBR */
-               kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+               kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
                return 1;
        }
 
-       switch ((vcpu->arch.hsr >> 22) & 0x3) {
-       case 0:
-               len = 1;
-               break;
-       case 1:
-               len = 2;
-               break;
-       case 2:
-               len = 4;
-               break;
-       default:
-               kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
-               return -EFAULT;
-       }
+       len = kvm_vcpu_dabt_get_as(vcpu);
+       if (unlikely(len < 0))
+               return len;
 
-       is_write = vcpu->arch.hsr & HSR_WNR;
-       sign_extend = vcpu->arch.hsr & HSR_SSE;
-       rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+       is_write = kvm_vcpu_dabt_iswrite(vcpu);
+       sign_extend = kvm_vcpu_dabt_issext(vcpu);
+       rt = kvm_vcpu_dabt_get_rd(vcpu);
 
        if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
                /* IO memory trying to read/write pc */
-               kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+               kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
                return 1;
        }
 
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * The MMIO instruction is emulated and should not be re-executed
         * in the guest.
         */
-       kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
        return 0;
 }
 
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
         * space do its magic.
         */
 
-       if (vcpu->arch.hsr & HSR_ISV) {
+       if (kvm_vcpu_dabt_isvalid(vcpu)) {
                ret = decode_hsr(vcpu, fault_ipa, &mmio);
                if (ret)
                        return ret;
index 99e07c7dd7451fac2dfd4e3e2ea80f6f3c3f236a..2f12e4056408e93c43976bd24d6a4109ac27753b 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kvm_host.h>
 #include <linux/io.h>
 #include <trace/events/kvm.h>
-#include <asm/idmap.h>
 #include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/kvm_arm.h>
@@ -28,8 +27,6 @@
 #include <asm/kvm_mmio.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
-#include <asm/mach/map.h>
-#include <trace/events/kvm.h>
 
 #include "trace.h"
 
@@ -37,19 +34,9 @@ extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];
 
 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
 
-static void kvm_tlb_flush_vmid(struct kvm *kvm)
+static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
-       kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
-}
-
-static void kvm_set_pte(pte_t *pte, pte_t new_pte)
-{
-       pte_val(*pte) = new_pte;
-       /*
-        * flush_pmd_entry just takes a void pointer and cleans the necessary
-        * cache entries, so we can reuse the function for ptes.
-        */
-       flush_pmd_entry(pte);
+       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
        }
 }
 
+static void free_hyp_pgd_entry(unsigned long addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       unsigned long hyp_addr = KERN_TO_HYP(addr);
+
+       pgd = hyp_pgd + pgd_index(hyp_addr);
+       pud = pud_offset(pgd, hyp_addr);
+
+       if (pud_none(*pud))
+               return;
+       BUG_ON(pud_bad(*pud));
+
+       pmd = pmd_offset(pud, hyp_addr);
+       free_ptes(pmd, addr);
+       pmd_free(NULL, pmd);
+       pud_clear(pud);
+}
+
 /**
  * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
  *
  * Assumes this is a page table used strictly in Hyp-mode and therefore contains
- * only mappings in the kernel memory area, which is above PAGE_OFFSET.
+ * either mappings in the kernel memory area (above PAGE_OFFSET), or
+ * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
  */
 void free_hyp_pmds(void)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
        unsigned long addr;
 
        mutex_lock(&kvm_hyp_pgd_mutex);
-       for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
-               pgd = hyp_pgd + pgd_index(addr);
-               pud = pud_offset(pgd, addr);
-
-               if (pud_none(*pud))
-                       continue;
-               BUG_ON(pud_bad(*pud));
-
-               pmd = pmd_offset(pud, addr);
-               free_ptes(pmd, addr);
-               pmd_free(NULL, pmd);
-               pud_clear(pud);
-       }
+       for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
+               free_hyp_pgd_entry(addr);
+       for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
+               free_hyp_pgd_entry(addr);
        mutex_unlock(&kvm_hyp_pgd_mutex);
 }
 
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
        struct page *page;
 
        for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
-               pte = pte_offset_kernel(pmd, addr);
+               unsigned long hyp_addr = KERN_TO_HYP(addr);
+
+               pte = pte_offset_kernel(pmd, hyp_addr);
                BUG_ON(!virt_addr_valid(addr));
                page = virt_to_page(addr);
                kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
        unsigned long addr;
 
        for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
-               pte = pte_offset_kernel(pmd, addr);
+               unsigned long hyp_addr = KERN_TO_HYP(addr);
+
+               pte = pte_offset_kernel(pmd, hyp_addr);
                BUG_ON(pfn_valid(*pfn_base));
                kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
                (*pfn_base)++;
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
        unsigned long addr, next;
 
        for (addr = start; addr < end; addr = next) {
-               pmd = pmd_offset(pud, addr);
+               unsigned long hyp_addr = KERN_TO_HYP(addr);
+               pmd = pmd_offset(pud, hyp_addr);
 
                BUG_ON(pmd_sect(*pmd));
 
                if (pmd_none(*pmd)) {
-                       pte = pte_alloc_one_kernel(NULL, addr);
+                       pte = pte_alloc_one_kernel(NULL, hyp_addr);
                        if (!pte) {
                                kvm_err("Cannot allocate Hyp pte\n");
                                return -ENOMEM;
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
        unsigned long addr, next;
        int err = 0;
 
-       BUG_ON(start > end);
-       if (start < PAGE_OFFSET)
+       if (start >= end)
+               return -EINVAL;
+       /* Check for a valid kernel memory mapping */
+       if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
+               return -EINVAL;
+       /* Check for a valid kernel IO mapping */
+       if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
                return -EINVAL;
 
        mutex_lock(&kvm_hyp_pgd_mutex);
        for (addr = start; addr < end; addr = next) {
-               pgd = hyp_pgd + pgd_index(addr);
-               pud = pud_offset(pgd, addr);
+               unsigned long hyp_addr = KERN_TO_HYP(addr);
+               pgd = hyp_pgd + pgd_index(hyp_addr);
+               pud = pud_offset(pgd, hyp_addr);
 
                if (pud_none_or_clear_bad(pud)) {
-                       pmd = pmd_alloc_one(NULL, addr);
+                       pmd = pmd_alloc_one(NULL, hyp_addr);
                        if (!pmd) {
                                kvm_err("Cannot allocate Hyp pmd\n");
                                err = -ENOMEM;
@@ -236,12 +243,13 @@ out:
 }
 
 /**
- * create_hyp_mappings - map a kernel virtual address range in Hyp mode
+ * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
  * @from:      The virtual kernel start address of the range
  * @to:                The virtual kernel end address of the range (exclusive)
  *
- * The same virtual address as the kernel virtual address is also used in
- * Hyp-mode mapping to the same underlying physical pages.
+ * The same virtual address as the kernel virtual address is also used
+ * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
+ * physical pages.
  *
  * Note: Wrapping around zero in the "to" address is not supported.
  */
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
 }
 
 /**
- * create_hyp_io_mappings - map a physical IO range in Hyp mode
- * @from:      The virtual HYP start address of the range
- * @to:                The virtual HYP end address of the range (exclusive)
+ * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
+ * @from:      The kernel start VA of the range
+ * @to:                The kernel end VA of the range (exclusive)
  * @addr:      The physical start address which gets mapped
+ *
+ * The resulting HYP VA is the same as the kernel VA, modulo
+ * HYP_PAGE_OFFSET.
  */
 int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
 {
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
        VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
 
        memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
-       clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+       kvm_clean_pgd(pgd);
        kvm->arch.pgd = pgd;
 
        return 0;
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
                        return 0; /* ignore calls from kvm_set_spte_hva */
                pmd = mmu_memory_cache_alloc(cache);
                pud_populate(NULL, pud, pmd);
-               pmd += pmd_index(addr);
                get_page(virt_to_page(pud));
-       } else
-               pmd = pmd_offset(pud, addr);
+       }
+
+       pmd = pmd_offset(pud, addr);
 
        /* Create 2nd stage page table mapping - Level 2 */
        if (pmd_none(*pmd)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
                pte = mmu_memory_cache_alloc(cache);
-               clean_pte_table(pte);
+               kvm_clean_pte(pte);
                pmd_populate_kernel(NULL, pmd, pte);
-               pte += pte_index(addr);
                get_page(virt_to_page(pmd));
-       } else
-               pte = pte_offset_kernel(pmd, addr);
+       }
+
+       pte = pte_offset_kernel(pmd, addr);
 
        if (iomap && pte_present(*pte))
                return -EFAULT;
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
        old_pte = *pte;
        kvm_set_pte(pte, *new_pte);
        if (pte_present(old_pte))
-               kvm_tlb_flush_vmid(kvm);
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
        else
                get_page(virt_to_page(pte));
 
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
        pfn = __phys_to_pfn(pa);
 
        for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
-               pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
+               pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
+               kvm_set_s2pte_writable(&pte);
 
                ret = mmu_topup_memory_cache(&cache, 2, 2);
                if (ret)
@@ -492,29 +504,6 @@ out:
        return ret;
 }
 
-static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
-{
-       /*
-        * If we are going to insert an instruction page and the icache is
-        * either VIPT or PIPT, there is a potential problem where the host
-        * (or another VM) may have used the same page as this guest, and we
-        * read incorrect data from the icache.  If we're using a PIPT cache,
-        * we can invalidate just that page, but if we are using a VIPT cache
-        * we need to invalidate the entire icache - damn shame - as written
-        * in the ARM ARM (DDI 0406C.b - Page B3-1393).
-        *
-        * VIVT caches are tagged using both the ASID and the VMID and doesn't
-        * need any kind of flushing (DDI 0406C.b - Page B3-1392).
-        */
-       if (icache_is_pipt()) {
-               unsigned long hva = gfn_to_hva(kvm, gfn);
-               __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
-       } else if (!icache_is_vivt_asid_tagged()) {
-               /* any kind of VIPT cache */
-               __flush_icache_all();
-       }
-}
-
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          gfn_t gfn, struct kvm_memory_slot *memslot,
                          unsigned long fault_status)
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        unsigned long mmu_seq;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
 
-       write_fault = kvm_is_write_fault(vcpu->arch.hsr);
+       write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
        if (fault_status == FSC_PERM && !write_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
                goto out_unlock;
        if (writable) {
-               pte_val(new_pte) |= L_PTE_S2_RDWR;
+               kvm_set_s2pte_writable(&new_pte);
                kvm_set_pfn_dirty(pfn);
        }
        stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
@@ -585,7 +574,6 @@ out_unlock:
  */
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       unsigned long hsr_ec;
        unsigned long fault_status;
        phys_addr_t fault_ipa;
        struct kvm_memory_slot *memslot;
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        gfn_t gfn;
        int ret, idx;
 
-       hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
-       is_iabt = (hsr_ec == HSR_EC_IABT);
-       fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
+       is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+       fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
 
-       trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
-                             vcpu->arch.hxfar, fault_ipa);
+       trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
+                             kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
        /* Check the stage-2 fault is trans. fault or write fault */
-       fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
+       fault_status = kvm_vcpu_trap_get_fault(vcpu);
        if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
-               kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
-                       hsr_ec, fault_status);
+               kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
+                       kvm_vcpu_trap_get_class(vcpu), fault_status);
                return -EFAULT;
        }
 
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
                if (is_iabt) {
                        /* Prefetch Abort on I/O address */
-                       kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+                       kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
                        ret = 1;
                        goto out_unlock;
                }
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
                        goto out_unlock;
                }
 
-               /* Adjust page offset */
-               fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
+               /*
+                * The IPA is reported as [MAX:12], so we need to
+                * complement it with the bottom 12 bits from the
+                * faulting VA. This is always 12 bits, irrespective
+                * of the page size.
+                */
+               fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
                ret = io_mem_abort(vcpu, run, fault_ipa);
                goto out_unlock;
        }
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
        unmap_stage2_range(kvm, gpa, PAGE_SIZE);
-       kvm_tlb_flush_vmid(kvm);
+       kvm_tlb_flush_vmid_ipa(kvm, gpa);
 }
 
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
                pmd = pmd_offset(pud, addr);
 
                pud_clear(pud);
-               clean_pmd_entry(pmd);
+               kvm_clean_pmd_entry(pmd);
                pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
        } while (pgd++, addr = next, addr < end);
 }
index 0e4cfe123b385339629499ae5dacb4f6f5e08ff6..17c5ac7d10ed173753dd96ae4a7b25aa3106998b 100644 (file)
@@ -1477,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
        if (addr & ~KVM_PHYS_MASK)
                return -E2BIG;
 
-       if (addr & ~PAGE_MASK)
+       if (addr & (SZ_4K - 1))
                return -EINVAL;
 
        mutex_lock(&kvm->lock);
index 8a68f1ec66b9ac07dd9e20cf7c6239e794972407..577298ed5a44973964d61caf94bf5e936dba20d4 100644 (file)
@@ -300,7 +300,7 @@ void __init omap3xxx_check_revision(void)
         * If the processor type is Cortex-A8 and the revision is 0x0
         * it means its Cortex r0p0 which is 3430 ES1.0.
         */
-       cpuid = read_cpuid(CPUID_ID);
+       cpuid = read_cpuid_id();
        if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
                omap_revision = OMAP3430_REV_ES1_0;
                cpu_rev = "1.0";
@@ -460,7 +460,7 @@ void __init omap4xxx_check_revision(void)
         * Use ARM register to detect the correct ES version
         */
        if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) {
-               idcode = read_cpuid(CPUID_ID);
+               idcode = read_cpuid_id();
                rev = (idcode & 0xf) - 1;
        }
 
index d9727218dd0a3089792a1ef25737987ce9034f5b..7f5626d8fd3edc62afab225e6e160fe34fb001c8 100644 (file)
@@ -209,7 +209,7 @@ static void __init omap4_smp_init_cpus(void)
        unsigned int i = 0, ncores = 1, cpu_id;
 
        /* Use ARM cpuid check here, as SoC detection will not work so early */
-       cpu_id = read_cpuid(CPUID_ID) & CPU_MASK;
+       cpu_id = read_cpuid_id() & CPU_MASK;
        if (cpu_id == CPU_CORTEX_A9) {
                /*
                 * Currently we can't call ioremap here because
index 025d173287307afdb5db4bc0b7a9679511629c11..35955b54944c1c5ed39bf051667b337aeaa7dee3 100644 (file)
@@ -43,7 +43,7 @@ config CPU_ARM740T
        depends on !MMU
        select CPU_32v4T
        select CPU_ABRT_LV4T
-       select CPU_CACHE_V3     # although the core is v4t
+       select CPU_CACHE_V4
        select CPU_CP15_MPU
        select CPU_PABRT_LEGACY
        help
@@ -397,6 +397,13 @@ config CPU_V7
        select CPU_PABRT_V7
        select CPU_TLB_V7 if MMU
 
+config CPU_THUMBONLY
+       bool
+       # There are no CPUs available with MMU that don't implement an ARM ISA:
+       depends on !MMU
+       help
+         Select this if your CPU doesn't support the 32 bit ARM instructions.
+
 # Figure out what processor architecture version we should be using.
 # This defines the compiler instruction set which depends on the machine type.
 config CPU_32v3
@@ -469,9 +476,6 @@ config CPU_PABRT_V7
        bool
 
 # The cache model
-config CPU_CACHE_V3
-       bool
-
 config CPU_CACHE_V4
        bool
 
@@ -608,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT
        bool
 
 config ARM_THUMB
-       bool "Support Thumb user binaries"
+       bool "Support Thumb user binaries" if !CPU_THUMBONLY
        depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
        default y
        help
index 4e333fa2756f4de16b4005ca65e1eb44cff0c579..9e51be96f635b5945b978bed2c6978fd3d2b61ba 100644 (file)
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY)        += pabort-legacy.o
 obj-$(CONFIG_CPU_PABRT_V6)     += pabort-v6.o
 obj-$(CONFIG_CPU_PABRT_V7)     += pabort-v7.o
 
-obj-$(CONFIG_CPU_CACHE_V3)     += cache-v3.o
 obj-$(CONFIG_CPU_CACHE_V4)     += cache-v4.o
 obj-$(CONFIG_CPU_CACHE_V4WT)   += cache-v4wt.o
 obj-$(CONFIG_CPU_CACHE_V4WB)   += cache-v4wb.o
index db26e2e543f46d1ca0896d861c336df8e9a3f7d5..6f4585b89078749e39383d1aee50f51040624a74 100644 (file)
@@ -961,12 +961,14 @@ static int __init alignment_init(void)
                return -ENOMEM;
 #endif
 
+#ifdef CONFIG_CPU_CP15
        if (cpu_is_v6_unaligned()) {
                cr_alignment &= ~CR_A;
                cr_no_alignment &= ~CR_A;
                set_cr(cr_alignment);
                ai_usermode = safe_usermode(ai_usermode, false);
        }
+#endif
 
        hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
                        "alignment exception");
index dd3d59122cc374c80ad487ad57bc3f5fbc580325..48bc3c0a87ce321cc2e37c257bf217dc085307e6 100644 (file)
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
        outer_cache.inv_range = feroceon_l2_inv_range;
        outer_cache.clean_range = feroceon_l2_clean_range;
        outer_cache.flush_range = feroceon_l2_flush_range;
+       outer_cache.inv_all = l2_inv_all;
 
        enable_l2();
 
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644 (file)
index 8a3fade..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *  linux/arch/arm/mm/cache-v3.S
- *
- *  Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include "proc-macros.S"
-
-/*
- *     flush_icache_all()
- *
- *     Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
-       mov     pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- *     flush_user_cache_all()
- *
- *     Invalidate all cache entries in a particular address
- *     space.
- *
- *     - mm    - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
-       /* FALLTHROUGH */
-/*
- *     flush_kern_cache_all()
- *
- *     Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
-       /* FALLTHROUGH */
-
-/*
- *     flush_user_cache_range(start, end, flags)
- *
- *     Invalidate a range of cache entries in the specified
- *     address space.
- *
- *     - start - start address (may not be aligned)
- *     - end   - end address (exclusive, may not be aligned)
- *     - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
-       mov     ip, #0
-       mcreq   p15, 0, ip, c7, c0, 0           @ flush ID cache
-       mov     pc, lr
-
-/*
- *     coherent_kern_range(start, end)
- *
- *     Ensure coherency between the Icache and the Dcache in the
- *     region described by start.  If you have non-snooping
- *     Harvard caches, you need to implement this function.
- *
- *     - start  - virtual start address
- *     - end    - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
-       /* FALLTHROUGH */
-
-/*
- *     coherent_user_range(start, end)
- *
- *     Ensure coherency between the Icache and the Dcache in the
- *     region described by start.  If you have non-snooping
- *     Harvard caches, you need to implement this function.
- *
- *     - start  - virtual start address
- *     - end    - virtual end address
- */
-ENTRY(v3_coherent_user_range)
-       mov     r0, #0
-       mov     pc, lr
-
-/*
- *     flush_kern_dcache_area(void *page, size_t size)
- *
- *     Ensure no D cache aliasing occurs, either with itself or
- *     the I cache
- *
- *     - addr  - kernel address
- *     - size  - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
-       /* FALLTHROUGH */
-
-/*
- *     dma_flush_range(start, end)
- *
- *     Clean and invalidate the specified virtual address range.
- *
- *     - start  - virtual start address
- *     - end    - virtual end address
- */
-ENTRY(v3_dma_flush_range)
-       mov     r0, #0
-       mcr     p15, 0, r0, c7, c0, 0           @ flush ID cache
-       mov     pc, lr
-
-/*
- *     dma_unmap_area(start, size, dir)
- *     - start - kernel virtual start address
- *     - size  - size of region
- *     - dir   - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
-       teq     r2, #DMA_TO_DEVICE
-       bne     v3_dma_flush_range
-       /* FALLTHROUGH */
-
-/*
- *     dma_map_area(start, size, dir)
- *     - start - kernel virtual start address
- *     - size  - size of region
- *     - dir   - DMA direction
- */
-ENTRY(v3_dma_map_area)
-       mov     pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
-       .globl  v3_flush_kern_cache_louis
-       .equ    v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v3
index 43e5d77be677a329f497f9829364f392b254fa75..a7ba68f59f0cd8564fa2a12b84e0e2b3f13b6860 100644 (file)
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
 ENTRY(v4_flush_user_cache_range)
 #ifdef CONFIG_CPU_CP15
        mov     ip, #0
-       mcreq   p15, 0, ip, c7, c7, 0           @ flush ID cache
+       mcr     p15, 0, ip, c7, c7, 0           @ flush ID cache
        mov     pc, lr
 #else
        /* FALLTHROUGH */
index e9db6b4bf65a158adf54bd4a799dbc7fee754c36..ef3e0f3aac96261d1c8e73671a00419c977902bc 100644 (file)
@@ -823,16 +823,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
                if (PageHighMem(page)) {
                        if (len + offset > PAGE_SIZE)
                                len = PAGE_SIZE - offset;
-                       vaddr = kmap_high_get(page);
-                       if (vaddr) {
-                               vaddr += offset;
-                               op(vaddr, len, dir);
-                               kunmap_high(page);
-                       } else if (cache_is_vipt()) {
-                               /* unmapped pages might still be cached */
+
+                       if (cache_is_vipt_nonaliasing()) {
                                vaddr = kmap_atomic(page);
                                op(vaddr + offset, len, dir);
                                kunmap_atomic(vaddr);
+                       } else {
+                               vaddr = kmap_high_get(page);
+                               if (vaddr) {
+                                       op(vaddr + offset, len, dir);
+                                       kunmap_high(page);
+                               }
                        }
                } else {
                        vaddr = page_address(page) + offset;
index 1c8f7f56417598303cac08ca3baa8353361366ff..0d473cce501c137e8c87c40f9ccb455d6ec275eb 100644 (file)
@@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
        if (!PageHighMem(page)) {
                __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
        } else {
-               void *addr = kmap_high_get(page);
-               if (addr) {
-                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                       kunmap_high(page);
-               } else if (cache_is_vipt()) {
-                       /* unmapped pages might still be cached */
+               void *addr;
+
+               if (cache_is_vipt_nonaliasing()) {
                        addr = kmap_atomic(page);
                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
                        kunmap_atomic(addr);
+               } else {
+                       addr = kmap_high_get(page);
+                       if (addr) {
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                               kunmap_high(page);
+                       }
                }
        }
 
index 78978945492a2a105f1a9a2d18e81cd76dc1e7b6..e0d8565671a6c8104c643937098b963ac2f517ee 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/mach/pci.h>
 
 #include "mm.h"
+#include "tcm.h"
 
 /*
  * empty_zero_page is a special page that is used for
@@ -112,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = {
        }
 };
 
+#ifdef CONFIG_CPU_CP15
 /*
  * These are useful for identifying cache coherency
  * problems by allowing the cache or the cache and
@@ -210,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
 }
 #endif
 
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static int __init early_cachepolicy(char *p)
+{
+       pr_warning("cachepolicy kernel parameter not supported without cp15\n");
+}
+early_param("cachepolicy", early_cachepolicy);
+
+static int __init noalign_setup(char *__unused)
+{
+       pr_warning("noalign kernel parameter not supported without cp15\n");
+}
+__setup("noalign", noalign_setup);
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
 #define PROT_PTE_DEVICE                L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 #define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
@@ -1277,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc)
        dma_contiguous_remap();
        devicemaps_init(mdesc);
        kmap_init();
+       tcm_init();
 
        top_pmd = pmd_off_k(0xffff0000);
 
index dc5de5d53f20c4865f6522d86fa96e7d9495071e..fde2d2a794cfbc0b43c31619fff9668172aebad3 100644 (file)
@@ -77,24 +77,27 @@ __arm740_setup:
        mcr     p15, 0, r0, c6, c0              @ set area 0, default
 
        ldr     r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
-       ldr     r1, =(CONFIG_DRAM_SIZE >> 12)   @ size of RAM (must be >= 4KB)
-       mov     r2, #10                         @ 11 is the minimum (4KB)
-1:     add     r2, r2, #1                      @ area size *= 2
-       mov     r1, r1, lsr #1
+       ldr     r3, =(CONFIG_DRAM_SIZE >> 12)   @ size of RAM (must be >= 4KB)
+       mov     r4, #10                         @ 11 is the minimum (4KB)
+1:     add     r4, r4, #1                      @ area size *= 2
+       movs    r3, r3, lsr #1
        bne     1b                              @ count not zero r-shift
-       orr     r0, r0, r2, lsl #1              @ the area register value
+       orr     r0, r0, r4, lsl #1              @ the area register value
        orr     r0, r0, #1                      @ set enable bit
        mcr     p15, 0, r0, c6, c1              @ set area 1, RAM
 
        ldr     r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
-       ldr     r1, =(CONFIG_FLASH_SIZE >> 12)  @ size of FLASH (must be >= 4KB)
-       mov     r2, #10                         @ 11 is the minimum (4KB)
-1:     add     r2, r2, #1                      @ area size *= 2
-       mov     r1, r1, lsr #1
+       ldr     r3, =(CONFIG_FLASH_SIZE >> 12)  @ size of FLASH (must be >= 4KB)
+       cmp     r3, #0
+       moveq   r0, #0
+       beq     2f
+       mov     r4, #10                         @ 11 is the minimum (4KB)
+1:     add     r4, r4, #1                      @ area size *= 2
+       movs    r3, r3, lsr #1
        bne     1b                              @ count not zero r-shift
-       orr     r0, r0, r2, lsl #1              @ the area register value
+       orr     r0, r0, r4, lsl #1              @ the area register value
        orr     r0, r0, #1                      @ set enable bit
-       mcr     p15, 0, r0, c6, c2              @ set area 2, ROM/FLASH
+2:     mcr     p15, 0, r0, c6, c2              @ set area 2, ROM/FLASH
 
        mov     r0, #0x06
        mcr     p15, 0, r0, c2, c0              @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
        .long   0x41807400
        .long   0xfffffff0
        .long   0
+       .long   0
        b       __arm740_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
-       .long   HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+       .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
        .long   cpu_arm740_name
        .long   arm740_processor_functions
        .long   0
        .long   0
-       .long   v3_cache_fns                    @ cache model
+       .long   v4_cache_fns                    @ cache model
        .size   __arm740_proc_info, . - __arm740_proc_info
index 2c3b9421ab5eca938dfe9289fc39472259ada3c1..2556cf1c2da1c8f40c09edbe7d43b7c0286f355f 100644 (file)
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm920_suspend_size
 .equ   cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_arm920_do_suspend)
        stmfd   sp!, {r4 - r6, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index f1803f7e29728460965675b6722a978d0cc33dcd..344c8a548cc0ef95aea9653d883b26c629f016dd 100644 (file)
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm926_suspend_size
 .equ   cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_arm926_do_suspend)
        stmfd   sp!, {r4 - r6, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index 82f9cdc751d6421d01bcb4eca3de51dd211f1868..0b60dd3d742a1d909e4ffdf8d5162cd052754098 100644 (file)
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
 
 .globl cpu_mohawk_suspend_size
 .equ   cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_mohawk_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index 3aa0da11fd8473376405d6fe57b76b777ca1cf93..d92dfd081429294200875ddedfbd6beb4fd98735 100644 (file)
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
 
 .globl cpu_sa1100_suspend_size
 .equ   cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_sa1100_do_suspend)
        stmfd   sp!, {r4 - r6, lr}
        mrc     p15, 0, r4, c3, c0, 0           @ domain ID
index 3e6210b4d6d4cc713ba524ab3966760ad1d6f6f8..054b491ff7649ca067ff821770aec80a4da42102 100644 (file)
@@ -17,7 +17,9 @@
 
 #ifndef MULTI_CPU
 EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
 EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
 #else
 EXPORT_SYMBOL(processor);
 #endif
index bcaaa8de93250fb73dbab83b3b0d07a74ccd70fc..919405e20b80e73272a519a7c3b377b9eccc2dd0 100644 (file)
@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle)
        mov     pc, lr
 
 ENTRY(cpu_v6_dcache_clean_area)
-#ifndef TLB_CAN_READ_FROM_L1_CACHE
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #D_CACHE_LINE_SIZE
        subs    r1, r1, #D_CACHE_LINE_SIZE
        bhi     1b
-#endif
        mov     pc, lr
 
 /*
@@ -138,7 +136,7 @@ ENTRY(cpu_v6_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
 .globl cpu_v6_suspend_size
 .equ   cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_v6_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
index 78f520bc0e99aabad65805521acc5e24e6b48a43..9704097c450e13300be619e73015c65b7bd08c4a 100644 (file)
@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext)
  ARM(  str     r3, [r0, #2048]! )
  THUMB(        add     r0, r0, #2048 )
  THUMB(        str     r3, [r0] )
-       mcr     p15, 0, r0, c7, c10, 1          @ flush_pte
+       ALT_SMP(mov     pc,lr)
+       ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
 #endif
        mov     pc, lr
 ENDPROC(cpu_v7_set_pte_ext)
index 6ffd78c0f9abeeacf0295dd888a9b537e9249604..363027e811d6f5c803d7616c318d4aeab7bfb3e9 100644 (file)
@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext)
        tst     r3, #1 << (55 - 32)             @ L_PTE_DIRTY
        orreq   r2, #L_PTE_RDONLY
 1:     strd    r2, r3, [r0]
-       mcr     p15, 0, r0, c7, c10, 1          @ flush_pte
+       ALT_SMP(mov     pc, lr)
+       ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
 #endif
        mov     pc, lr
 ENDPROC(cpu_v7_set_pte_ext)
index f584d3f5b37c5855782a0259c327bc6347bba89a..2c73a7301ff7017eea046df2f2b67e475ab653de 100644 (file)
@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle)
 ENDPROC(cpu_v7_do_idle)
 
 ENTRY(cpu_v7_dcache_clean_area)
-#ifndef TLB_CAN_READ_FROM_L1_CACHE
+       ALT_SMP(mov     pc, lr)                 @ MP extensions imply L1 PTW
+       ALT_UP(W(nop))
        dcache_line_size r2, r3
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, r2
        subs    r1, r1, r2
        bhi     1b
        dsb
-#endif
        mov     pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
 
@@ -402,6 +402,8 @@ __v7_ca9mp_proc_info:
        __v7_proc __v7_ca9mp_setup
        .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
 
+#endif /* CONFIG_ARM_LPAE */
+
        /*
         * Marvell PJ4B processor.
         */
@@ -411,7 +413,6 @@ __v7_pj4b_proc_info:
        .long   0xfffffff0
        __v7_proc __v7_pj4b_setup
        .size   __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
-#endif /* CONFIG_ARM_LPAE */
 
        /*
         * ARM Ltd. Cortex A7 processor.
index eb93d6487f3598fcb0cc6e92c0f3e23faa36fb11..e8efd83b6f252f00b85b29eb45d24bbd4919d4ae 100644 (file)
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
 
 .globl cpu_xsc3_suspend_size
 .equ   cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_xsc3_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index 25510361aa181e7200d6f69a64d41ff6b8f8a8a6..e766f889bfd6d1b4159c6724a975468dbba56892 100644 (file)
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
 
 .globl cpu_xscale_suspend_size
 .equ   cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_xscale_do_suspend)
        stmfd   sp!, {r4 - r9, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
new file mode 100644 (file)
index 0000000..8015ad4
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif
index 831e1fdfdb2faff4e6299cbd90f843c1ef2ddcc4..a10297da122b75b270432bd8a9d2fb627507687e 100644 (file)
@@ -16,7 +16,7 @@
 # are merged into mainline or have been edited in the machine database
 # within the last 12 months.  References to machine_is_NAME() do not count!
 #
-# Last update: Thu Apr 26 08:44:23 2012
+# Last update: Fri Mar 22 17:24:50 2013
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -64,8 +64,8 @@ h7201                 ARCH_H7201              H7201                   161
 h7202                  ARCH_H7202              H7202                   162
 iq80321                        ARCH_IQ80321            IQ80321                 169
 ks8695                 ARCH_KS8695             KS8695                  180
-karo                   ARCH_KARO               KARO                    190
 smdk2410               ARCH_SMDK2410           SMDK2410                193
+ceiva                  ARCH_CEIVA              CEIVA                   200
 voiceblue              MACH_VOICEBLUE          VOICEBLUE               218
 h5400                  ARCH_H5400              H5400                   220
 omap_innovator         MACH_OMAP_INNOVATOR     OMAP_INNOVATOR          234
@@ -95,6 +95,7 @@ lpd7a400              MACH_LPD7A400           LPD7A400                389
 lpd7a404               MACH_LPD7A404           LPD7A404                390
 csb337                 MACH_CSB337             CSB337                  399
 mainstone              MACH_MAINSTONE          MAINSTONE               406
+lite300                        MACH_LITE300            LITE300                 408
 xcep                   MACH_XCEP               XCEP                    413
 arcom_vulcan           MACH_ARCOM_VULCAN       ARCOM_VULCAN            414
 nomadik                        MACH_NOMADIK            NOMADIK                 420
@@ -131,12 +132,14 @@ kb9200                    MACH_KB9200             KB9200                  612
 sx1                    MACH_SX1                SX1                     613
 ixdp465                        MACH_IXDP465            IXDP465                 618
 ixdp2351               MACH_IXDP2351           IXDP2351                619
+cm4008                 MACH_CM4008             CM4008                  624
 iq80332                        MACH_IQ80332            IQ80332                 629
 gtwx5715               MACH_GTWX5715           GTWX5715                641
 csb637                 MACH_CSB637             CSB637                  648
 n30                    MACH_N30                N30                     656
 nec_mp900              MACH_NEC_MP900          NEC_MP900               659
 kafa                   MACH_KAFA               KAFA                    662
+cm41xx                 MACH_CM41XX             CM41XX                  672
 ts72xx                 MACH_TS72XX             TS72XX                  673
 otom                   MACH_OTOM               OTOM                    680
 nexcoder_2440          MACH_NEXCODER_2440      NEXCODER_2440           681
@@ -149,6 +152,7 @@ colibri                     MACH_COLIBRI            COLIBRI                 729
 gateway7001            MACH_GATEWAY7001        GATEWAY7001             731
 pcm027                 MACH_PCM027             PCM027                  732
 anubis                 MACH_ANUBIS             ANUBIS                  734
+xboardgp8              MACH_XBOARDGP8          XBOARDGP8               742
 akita                  MACH_AKITA              AKITA                   744
 e330                   MACH_E330               E330                    753
 nokia770               MACH_NOKIA770           NOKIA770                755
@@ -157,9 +161,11 @@ edb9315a           MACH_EDB9315A           EDB9315A                772
 stargate2              MACH_STARGATE2          STARGATE2               774
 intelmote2             MACH_INTELMOTE2         INTELMOTE2              775
 trizeps4               MACH_TRIZEPS4           TRIZEPS4                776
+pnx4008                        MACH_PNX4008            PNX4008                 782
 cpuat91                        MACH_CPUAT91            CPUAT91                 787
 iq81340sc              MACH_IQ81340SC          IQ81340SC               799
 iq81340mc              MACH_IQ81340MC          IQ81340MC               801
+se4200                 MACH_SE4200             SE4200                  809
 micro9                 MACH_MICRO9             MICRO9                  811
 micro9l                        MACH_MICRO9L            MICRO9L                 812
 omap_palmte            MACH_OMAP_PALMTE        OMAP_PALMTE             817
@@ -178,6 +184,7 @@ mx21ads                     MACH_MX21ADS            MX21ADS                 851
 ams_delta              MACH_AMS_DELTA          AMS_DELTA               862
 nas100d                        MACH_NAS100D            NAS100D                 865
 magician               MACH_MAGICIAN           MAGICIAN                875
+cm4002                 MACH_CM4002             CM4002                  876
 nxdkn                  MACH_NXDKN              NXDKN                   880
 palmtx                 MACH_PALMTX             PALMTX                  885
 s3c2413                        MACH_S3C2413            S3C2413                 887
@@ -203,7 +210,6 @@ omap_fsample                MACH_OMAP_FSAMPLE       OMAP_FSAMPLE            970
 snapper_cl15           MACH_SNAPPER_CL15       SNAPPER_CL15            986
 omap_palmz71           MACH_OMAP_PALMZ71       OMAP_PALMZ71            993
 smdk2412               MACH_SMDK2412           SMDK2412                1009
-bkde303                        MACH_BKDE303            BKDE303                 1021
 smdk2413               MACH_SMDK2413           SMDK2413                1022
 aml_m5900              MACH_AML_M5900          AML_M5900               1024
 balloon3               MACH_BALLOON3           BALLOON3                1029
@@ -214,6 +220,7 @@ fsg                 MACH_FSG                FSG                     1091
 at91sam9260ek          MACH_AT91SAM9260EK      AT91SAM9260EK           1099
 glantank               MACH_GLANTANK           GLANTANK                1100
 n2100                  MACH_N2100              N2100                   1101
+im42xx                 MACH_IM42XX             IM42XX                  1105
 qt2410                 MACH_QT2410             QT2410                  1108
 kixrp435               MACH_KIXRP435           KIXRP435                1109
 cc9p9360dev            MACH_CC9P9360DEV        CC9P9360DEV             1114
@@ -247,6 +254,7 @@ csb726                      MACH_CSB726             CSB726                  1359
 davinci_dm6467_evm     MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM      1380
 davinci_dm355_evm      MACH_DAVINCI_DM355_EVM  DAVINCI_DM355_EVM       1381
 littleton              MACH_LITTLETON          LITTLETON               1388
+im4004                 MACH_IM4004             IM4004                  1400
 realview_pb11mp                MACH_REALVIEW_PB11MP    REALVIEW_PB11MP         1407
 mx27_3ds               MACH_MX27_3DS           MX27_3DS                1430
 halibut                        MACH_HALIBUT            HALIBUT                 1439
@@ -268,6 +276,7 @@ dns323                      MACH_DNS323             DNS323                  1542
 omap3_beagle           MACH_OMAP3_BEAGLE       OMAP3_BEAGLE            1546
 nokia_n810             MACH_NOKIA_N810         NOKIA_N810              1548
 pcm038                 MACH_PCM038             PCM038                  1551
+sg310                  MACH_SG310              SG310                   1564
 ts209                  MACH_TS209              TS209                   1565
 at91cap9adk            MACH_AT91CAP9ADK        AT91CAP9ADK             1566
 mx31moboard            MACH_MX31MOBOARD        MX31MOBOARD             1574
@@ -371,7 +380,6 @@ pcm043                      MACH_PCM043             PCM043                  2072
 sheevaplug             MACH_SHEEVAPLUG         SHEEVAPLUG              2097
 avengers_lite          MACH_AVENGERS_LITE      AVENGERS_LITE           2104
 mx51_babbage           MACH_MX51_BABBAGE       MX51_BABBAGE            2125
-tx37                   MACH_TX37               TX37                    2127
 rd78x00_masa           MACH_RD78X00_MASA       RD78X00_MASA            2135
 dm355_leopard          MACH_DM355_LEOPARD      DM355_LEOPARD           2138
 ts219                  MACH_TS219              TS219                   2139
@@ -380,12 +388,12 @@ davinci_da850_evm MACH_DAVINCI_DA850_EVM  DAVINCI_DA850_EVM       2157
 at91sam9g10ek          MACH_AT91SAM9G10EK      AT91SAM9G10EK           2159
 omap_4430sdp           MACH_OMAP_4430SDP       OMAP_4430SDP            2160
 magx_zn5               MACH_MAGX_ZN5           MAGX_ZN5                2162
-tx25                   MACH_TX25               TX25                    2177
 omap3_torpedo          MACH_OMAP3_TORPEDO      OMAP3_TORPEDO           2178
 anw6410                        MACH_ANW6410            ANW6410                 2183
 imx27_visstrim_m10     MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10      2187
 portuxg20              MACH_PORTUXG20          PORTUXG20               2191
 smdkc110               MACH_SMDKC110           SMDKC110                2193
+cabespresso            MACH_CABESPRESSO        CABESPRESSO             2194
 omap3517evm            MACH_OMAP3517EVM        OMAP3517EVM             2200
 netspace_v2            MACH_NETSPACE_V2        NETSPACE_V2             2201
 netspace_max_v2                MACH_NETSPACE_MAX_V2    NETSPACE_MAX_V2         2202
@@ -404,6 +412,7 @@ bigdisk                     MACH_BIGDISK            BIGDISK                 2283
 at91sam9g20ek_2mmc     MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC      2288
 bcmring                        MACH_BCMRING            BCMRING                 2289
 mahimahi               MACH_MAHIMAHI           MAHIMAHI                2304
+cerebric               MACH_CEREBRIC           CEREBRIC                2311
 smdk6442               MACH_SMDK6442           SMDK6442                2324
 openrd_base            MACH_OPENRD_BASE        OPENRD_BASE             2325
 devkit8000             MACH_DEVKIT8000         DEVKIT8000              2330
@@ -423,10 +432,10 @@ raumfeld_rc               MACH_RAUMFELD_RC        RAUMFELD_RC             2413
 raumfeld_connector     MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR      2414
 raumfeld_speaker       MACH_RAUMFELD_SPEAKER   RAUMFELD_SPEAKER        2415
 tnetv107x              MACH_TNETV107X          TNETV107X               2418
-mx51_m2id              MACH_MX51_M2ID          MX51_M2ID               2428
 smdkv210               MACH_SMDKV210           SMDKV210                2456
 omap_zoom3             MACH_OMAP_ZOOM3         OMAP_ZOOM3              2464
 omap_3630sdp           MACH_OMAP_3630SDP       OMAP_3630SDP            2465
+cybook2440             MACH_CYBOOK2440         CYBOOK2440              2466
 smartq7                        MACH_SMARTQ7            SMARTQ7                 2479
 watson_efm_plugin      MACH_WATSON_EFM_PLUGIN  WATSON_EFM_PLUGIN       2491
 g4evm                  MACH_G4EVM              G4EVM                   2493
@@ -434,12 +443,10 @@ omapl138_hawkboard        MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD      2495
 ts41x                  MACH_TS41X              TS41X                   2502
 phy3250                        MACH_PHY3250            PHY3250                 2511
 mini6410               MACH_MINI6410           MINI6410                2520
-tx51                   MACH_TX51               TX51                    2529
 mx28evk                        MACH_MX28EVK            MX28EVK                 2531
 smartq5                        MACH_SMARTQ5            SMARTQ5                 2534
 davinci_dm6467tevm     MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM      2548
 mxt_td60               MACH_MXT_TD60           MXT_TD60                2550
-pca101                 MACH_PCA101             PCA101                  2595
 capc7117               MACH_CAPC7117           CAPC7117                2612
 icontrol               MACH_ICONTROL           ICONTROL                2624
 gplugd                 MACH_GPLUGD             GPLUGD                  2625
@@ -465,6 +472,7 @@ igep0030            MACH_IGEP0030           IGEP0030                2717
 sbc3530                        MACH_SBC3530            SBC3530                 2722
 saarb                  MACH_SAARB              SAARB                   2727
 harmony                        MACH_HARMONY            HARMONY                 2731
+cybook_orizon          MACH_CYBOOK_ORIZON      CYBOOK_ORIZON           2733
 msm7x30_fluid          MACH_MSM7X30_FLUID      MSM7X30_FLUID           2741
 cm_t3517               MACH_CM_T3517           CM_T3517                2750
 wbd222                 MACH_WBD222             WBD222                  2753
@@ -480,10 +488,8 @@ eukrea_cpuimx35sd  MACH_EUKREA_CPUIMX35SD  EUKREA_CPUIMX35SD       2821
 eukrea_cpuimx51sd      MACH_EUKREA_CPUIMX51SD  EUKREA_CPUIMX51SD       2822
 eukrea_cpuimx51                MACH_EUKREA_CPUIMX51    EUKREA_CPUIMX51         2823
 smdkc210               MACH_SMDKC210           SMDKC210                2838
-pcaal1                 MACH_PCAAL1             PCAAL1                  2843
 t5325                  MACH_T5325              T5325                   2846
 income                 MACH_INCOME             INCOME                  2849
-mx257sx                        MACH_MX257SX            MX257SX                 2861
 goni                   MACH_GONI               GONI                    2862
 bv07                   MACH_BV07               BV07                    2882
 openrd_ultimate                MACH_OPENRD_ULTIMATE    OPENRD_ULTIMATE         2884
@@ -491,7 +497,6 @@ devixp                      MACH_DEVIXP             DEVIXP                  2885
 miccpt                 MACH_MICCPT             MICCPT                  2886
 mic256                 MACH_MIC256             MIC256                  2887
 u5500                  MACH_U5500              U5500                   2890
-pov15hd                        MACH_POV15HD            POV15HD                 2910
 linkstation_lschl      MACH_LINKSTATION_LSCHL  LINKSTATION_LSCHL       2913
 smdkv310               MACH_SMDKV310           SMDKV310                2925
 wm8505_7in_netbook     MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK      2928
@@ -518,7 +523,6 @@ prima2_evb          MACH_PRIMA2_EVB         PRIMA2_EVB              3103
 paz00                  MACH_PAZ00              PAZ00                   3128
 acmenetusfoxg20                MACH_ACMENETUSFOXG20    ACMENETUSFOXG20         3129
 ag5evm                 MACH_AG5EVM             AG5EVM                  3189
-tsunagi                        MACH_TSUNAGI            TSUNAGI                 3197
 ics_if_voip            MACH_ICS_IF_VOIP        ICS_IF_VOIP             3206
 wlf_cragg_6410         MACH_WLF_CRAGG_6410     WLF_CRAGG_6410          3207
 trimslice              MACH_TRIMSLICE          TRIMSLICE               3209
@@ -529,8 +533,6 @@ msm8960_sim         MACH_MSM8960_SIM        MSM8960_SIM             3230
 msm8960_rumi3          MACH_MSM8960_RUMI3      MSM8960_RUMI3           3231
 gsia18s                        MACH_GSIA18S            GSIA18S                 3234
 mx53_loco              MACH_MX53_LOCO          MX53_LOCO               3273
-tx53                   MACH_TX53               TX53                    3279
-encore                 MACH_ENCORE             ENCORE                  3284
 wario                  MACH_WARIO              WARIO                   3288
 cm_t3730               MACH_CM_T3730           CM_T3730                3290
 hrefv60                        MACH_HREFV60            HREFV60                 3293
@@ -538,603 +540,24 @@ armlex4210               MACH_ARMLEX4210         ARMLEX4210              3361
 snowball               MACH_SNOWBALL           SNOWBALL                3363
 xilinx_ep107           MACH_XILINX_EP107       XILINX_EP107            3378
 nuri                   MACH_NURI               NURI                    3379
-wtplug                 MACH_WTPLUG             WTPLUG                  3412
-veridis_a300           MACH_VERIDIS_A300       VERIDIS_A300            3448
 origen                 MACH_ORIGEN             ORIGEN                  3455
-wm8650refboard         MACH_WM8650REFBOARD     WM8650REFBOARD          3472
-xarina                 MACH_XARINA             XARINA                  3476
-sdvr                   MACH_SDVR               SDVR                    3478
-acer_maya              MACH_ACER_MAYA          ACER_MAYA               3479
-pico                   MACH_PICO               PICO                    3480
-cwmx233                        MACH_CWMX233            CWMX233                 3481
-cwam1808               MACH_CWAM1808           CWAM1808                3482
-cwdm365                        MACH_CWDM365            CWDM365                 3483
-mx51_moray             MACH_MX51_MORAY         MX51_MORAY              3484
-thales_cbc             MACH_THALES_CBC         THALES_CBC              3485
-bluepoint              MACH_BLUEPOINT          BLUEPOINT               3486
-dir665                 MACH_DIR665             DIR665                  3487
-acmerover1             MACH_ACMEROVER1         ACMEROVER1              3488
-shooter_ct             MACH_SHOOTER_CT         SHOOTER_CT              3489
-bliss                  MACH_BLISS              BLISS                   3490
-blissc                 MACH_BLISSC             BLISSC                  3491
-thales_adc             MACH_THALES_ADC         THALES_ADC              3492
-ubisys_p9d_evp         MACH_UBISYS_P9D_EVP     UBISYS_P9D_EVP          3493
-atdgp318               MACH_ATDGP318           ATDGP318                3494
-dma210u                        MACH_DMA210U            DMA210U                 3495
-em_t3                  MACH_EM_T3              EM_T3                   3496
-htx3250                        MACH_HTX3250            HTX3250                 3497
-g50                    MACH_G50                G50                     3498
-eco5                   MACH_ECO5               ECO5                    3499
-wintergrasp            MACH_WINTERGRASP        WINTERGRASP             3500
-puro                   MACH_PURO               PURO                    3501
-shooter_k              MACH_SHOOTER_K          SHOOTER_K               3502
 nspire                 MACH_NSPIRE             NSPIRE                  3503
-mickxx                 MACH_MICKXX             MICKXX                  3504
-lxmb                   MACH_LXMB               LXMB                    3505
-adam                   MACH_ADAM               ADAM                    3507
-b1004                  MACH_B1004              B1004                   3508
-oboea                  MACH_OBOEA              OBOEA                   3509
-a1015                  MACH_A1015              A1015                   3510
-robin_vbdt30           MACH_ROBIN_VBDT30       ROBIN_VBDT30            3511
-tegra_enterprise       MACH_TEGRA_ENTERPRISE   TEGRA_ENTERPRISE        3512
-rfl108200_mk10         MACH_RFL108200_MK10     RFL108200_MK10          3513
-rfl108300_mk16         MACH_RFL108300_MK16     RFL108300_MK16          3514
-rover_v7               MACH_ROVER_V7           ROVER_V7                3515
-miphone                        MACH_MIPHONE            MIPHONE                 3516
-femtobts               MACH_FEMTOBTS           FEMTOBTS                3517
-monopoli               MACH_MONOPOLI           MONOPOLI                3518
-boss                   MACH_BOSS               BOSS                    3519
-davinci_dm368_vtam     MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM      3520
-clcon                  MACH_CLCON              CLCON                   3521
 nokia_rm696            MACH_NOKIA_RM696        NOKIA_RM696             3522
-tahiti                 MACH_TAHITI             TAHITI                  3523
-fighter                        MACH_FIGHTER            FIGHTER                 3524
-sgh_i710               MACH_SGH_I710           SGH_I710                3525
-integreproscb          MACH_INTEGREPROSCB      INTEGREPROSCB           3526
-monza                  MACH_MONZA              MONZA                   3527
-calimain               MACH_CALIMAIN           CALIMAIN                3528
-mx6q_sabreauto         MACH_MX6Q_SABREAUTO     MX6Q_SABREAUTO          3529
-gma01x                 MACH_GMA01X             GMA01X                  3530
-sbc51                  MACH_SBC51              SBC51                   3531
-fit                    MACH_FIT                FIT                     3532
-steelhead              MACH_STEELHEAD          STEELHEAD               3533
-panther                        MACH_PANTHER            PANTHER                 3534
-msm8960_liquid         MACH_MSM8960_LIQUID     MSM8960_LIQUID          3535
-lexikonct              MACH_LEXIKONCT          LEXIKONCT               3536
-ns2816_stb             MACH_NS2816_STB         NS2816_STB              3537
-sei_mm2_lpc3250                MACH_SEI_MM2_LPC3250    SEI_MM2_LPC3250         3538
-cmimx53                        MACH_CMIMX53            CMIMX53                 3539
-sandwich               MACH_SANDWICH           SANDWICH                3540
-chief                  MACH_CHIEF              CHIEF                   3541
-pogo_e02               MACH_POGO_E02           POGO_E02                3542
 mikrap_x168            MACH_MIKRAP_X168        MIKRAP_X168             3543
-htcmozart              MACH_HTCMOZART          HTCMOZART               3544
-htcgold                        MACH_HTCGOLD            HTCGOLD                 3545
-mt72xx                 MACH_MT72XX             MT72XX                  3546
-mx51_ivy               MACH_MX51_IVY           MX51_IVY                3547
-mx51_lvd               MACH_MX51_LVD           MX51_LVD                3548
-omap3_wiser2           MACH_OMAP3_WISER2       OMAP3_WISER2            3549
-dreamplug              MACH_DREAMPLUG          DREAMPLUG               3550
-cobas_c_111            MACH_COBAS_C_111        COBAS_C_111             3551
-cobas_u_411            MACH_COBAS_U_411        COBAS_U_411             3552
-hssd                   MACH_HSSD               HSSD                    3553
-iom35x                 MACH_IOM35X             IOM35X                  3554
-psom_omap              MACH_PSOM_OMAP          PSOM_OMAP               3555
-iphone_2g              MACH_IPHONE_2G          IPHONE_2G               3556
-iphone_3g              MACH_IPHONE_3G          IPHONE_3G               3557
-ipod_touch_1g          MACH_IPOD_TOUCH_1G      IPOD_TOUCH_1G           3558
-pharos_tpc             MACH_PHAROS_TPC         PHAROS_TPC              3559
-mx53_hydra             MACH_MX53_HYDRA         MX53_HYDRA              3560
-ns2816_dev_board       MACH_NS2816_DEV_BOARD   NS2816_DEV_BOARD        3561
-iphone_3gs             MACH_IPHONE_3GS         IPHONE_3GS              3562
-iphone_4               MACH_IPHONE_4           IPHONE_4                3563
-ipod_touch_4g          MACH_IPOD_TOUCH_4G      IPOD_TOUCH_4G           3564
-dragon_e1100           MACH_DRAGON_E1100       DRAGON_E1100            3565
-topside                        MACH_TOPSIDE            TOPSIDE                 3566
-irisiii                        MACH_IRISIII            IRISIII                 3567
 deto_macarm9           MACH_DETO_MACARM9       DETO_MACARM9            3568
-eti_d1                 MACH_ETI_D1             ETI_D1                  3569
-som3530sdk             MACH_SOM3530SDK         SOM3530SDK              3570
-oc_engine              MACH_OC_ENGINE          OC_ENGINE               3571
-apq8064_sim            MACH_APQ8064_SIM        APQ8064_SIM             3572
-alps                   MACH_ALPS               ALPS                    3575
-tny_t3730              MACH_TNY_T3730          TNY_T3730               3576
-geryon_nfe             MACH_GERYON_NFE         GERYON_NFE              3577
-ns2816_ref_board       MACH_NS2816_REF_BOARD   NS2816_REF_BOARD        3578
-silverstone            MACH_SILVERSTONE        SILVERSTONE             3579
-mtt2440                        MACH_MTT2440            MTT2440                 3580
-ynicdb                 MACH_YNICDB             YNICDB                  3581
-bct                    MACH_BCT                BCT                     3582
-tuscan                 MACH_TUSCAN             TUSCAN                  3583
-xbt_sam9g45            MACH_XBT_SAM9G45        XBT_SAM9G45             3584
-enbw_cmc               MACH_ENBW_CMC           ENBW_CMC                3585
-ch104mx257             MACH_CH104MX257         CH104MX257              3587
-openpri                        MACH_OPENPRI            OPENPRI                 3588
-am335xevm              MACH_AM335XEVM          AM335XEVM               3589
-picodmb                        MACH_PICODMB            PICODMB                 3590
-waluigi                        MACH_WALUIGI            WALUIGI                 3591
-punicag7               MACH_PUNICAG7           PUNICAG7                3592
-ipad_1g                        MACH_IPAD_1G            IPAD_1G                 3593
-appletv_2g             MACH_APPLETV_2G         APPLETV_2G              3594
-mach_ecog45            MACH_MACH_ECOG45        MACH_ECOG45             3595
-ait_cam_enc_4xx                MACH_AIT_CAM_ENC_4XX    AIT_CAM_ENC_4XX         3596
-runnymede              MACH_RUNNYMEDE          RUNNYMEDE               3597
-play                   MACH_PLAY               PLAY                    3598
-hw90260                        MACH_HW90260            HW90260                 3599
-tagh                   MACH_TAGH               TAGH                    3600
-filbert                        MACH_FILBERT            FILBERT                 3601
-getinge_netcomv3       MACH_GETINGE_NETCOMV3   GETINGE_NETCOMV3        3602
-cw20                   MACH_CW20               CW20                    3603
-cinema                 MACH_CINEMA             CINEMA                  3604
-cinema_tea             MACH_CINEMA_TEA         CINEMA_TEA              3605
-cinema_coffee          MACH_CINEMA_COFFEE      CINEMA_COFFEE           3606
-cinema_juice           MACH_CINEMA_JUICE       CINEMA_JUICE            3607
-mx53_mirage2           MACH_MX53_MIRAGE2       MX53_MIRAGE2            3609
-mx53_efikasb           MACH_MX53_EFIKASB       MX53_EFIKASB            3610
-stm_b2000              MACH_STM_B2000          STM_B2000               3612
 m28evk                 MACH_M28EVK             M28EVK                  3613
-pda                    MACH_PDA                PDA                     3614
-meraki_mr58            MACH_MERAKI_MR58        MERAKI_MR58             3615
 kota2                  MACH_KOTA2              KOTA2                   3616
-letcool                        MACH_LETCOOL            LETCOOL                 3617
-mx27iat                        MACH_MX27IAT            MX27IAT                 3618
-apollo_td              MACH_APOLLO_TD          APOLLO_TD               3619
-arena                  MACH_ARENA              ARENA                   3620
-gsngateway             MACH_GSNGATEWAY         GSNGATEWAY              3621
-lf2000                 MACH_LF2000             LF2000                  3622
 bonito                 MACH_BONITO             BONITO                  3623
-asymptote              MACH_ASYMPTOTE          ASYMPTOTE               3624
-bst2brd                        MACH_BST2BRD            BST2BRD                 3625
-tx335s                 MACH_TX335S             TX335S                  3626
-pelco_tesla            MACH_PELCO_TESLA        PELCO_TESLA             3627
-rrhtestplat            MACH_RRHTESTPLAT        RRHTESTPLAT             3628
-vidtonic_pro           MACH_VIDTONIC_PRO       VIDTONIC_PRO            3629
-pl_apollo              MACH_PL_APOLLO          PL_APOLLO               3630
-pl_phoenix             MACH_PL_PHOENIX         PL_PHOENIX              3631
-m28cu3                 MACH_M28CU3             M28CU3                  3632
-vvbox_hd               MACH_VVBOX_HD           VVBOX_HD                3633
-coreware_sam9260_      MACH_COREWARE_SAM9260_  COREWARE_SAM9260_       3634
-marmaduke              MACH_MARMADUKE          MARMADUKE               3635
-amg_xlcore_camera      MACH_AMG_XLCORE_CAMERA  AMG_XLCORE_CAMERA       3636
 omap3_egf              MACH_OMAP3_EGF          OMAP3_EGF               3637
 smdk4212               MACH_SMDK4212           SMDK4212                3638
-dnp9200                        MACH_DNP9200            DNP9200                 3639
-tf101                  MACH_TF101              TF101                   3640
-omap3silvio            MACH_OMAP3SILVIO        OMAP3SILVIO             3641
-picasso2               MACH_PICASSO2           PICASSO2                3642
-vangogh2               MACH_VANGOGH2           VANGOGH2                3643
-olpc_xo_1_75           MACH_OLPC_XO_1_75       OLPC_XO_1_75            3644
-gx400                  MACH_GX400              GX400                   3645
-gs300                  MACH_GS300              GS300                   3646
-acer_a9                        MACH_ACER_A9            ACER_A9                 3647
-vivow_evm              MACH_VIVOW_EVM          VIVOW_EVM               3648
-veloce_cxq             MACH_VELOCE_CXQ         VELOCE_CXQ              3649
-veloce_cxm             MACH_VELOCE_CXM         VELOCE_CXM              3650
-p1852                  MACH_P1852              P1852                   3651
-naxy100                        MACH_NAXY100            NAXY100                 3652
-taishan                        MACH_TAISHAN            TAISHAN                 3653
-touchlink              MACH_TOUCHLINK          TOUCHLINK               3654
-stm32f103ze            MACH_STM32F103ZE        STM32F103ZE             3655
-mcx                    MACH_MCX                MCX                     3656
-stm_nmhdk_fli7610      MACH_STM_NMHDK_FLI7610  STM_NMHDK_FLI7610       3657
-top28x                 MACH_TOP28X             TOP28X                  3658
-okl4vp_microvisor      MACH_OKL4VP_MICROVISOR  OKL4VP_MICROVISOR       3659
-pop                    MACH_POP                POP                     3660
-layer                  MACH_LAYER              LAYER                   3661
-trondheim              MACH_TRONDHEIM          TRONDHEIM               3662
-eva                    MACH_EVA                EVA                     3663
-trust_taurus           MACH_TRUST_TAURUS       TRUST_TAURUS            3664
-ns2816_huashan         MACH_NS2816_HUASHAN     NS2816_HUASHAN          3665
-ns2816_yangcheng       MACH_NS2816_YANGCHENG   NS2816_YANGCHENG        3666
-p852                   MACH_P852               P852                    3667
-flea3                  MACH_FLEA3              FLEA3                   3668
-bowfin                 MACH_BOWFIN             BOWFIN                  3669
-mv88de3100             MACH_MV88DE3100         MV88DE3100              3670
-pia_am35x              MACH_PIA_AM35X          PIA_AM35X               3671
-cedar                  MACH_CEDAR              CEDAR                   3672
-picasso_e              MACH_PICASSO_E          PICASSO_E               3673
-samsung_e60            MACH_SAMSUNG_E60        SAMSUNG_E60             3674
-sdvr_mini              MACH_SDVR_MINI          SDVR_MINI               3676
-omap3_ij3k             MACH_OMAP3_IJ3K         OMAP3_IJ3K              3677
-modasmc1               MACH_MODASMC1           MODASMC1                3678
-apq8064_rumi3          MACH_APQ8064_RUMI3      APQ8064_RUMI3           3679
-matrix506              MACH_MATRIX506          MATRIX506               3680
-msm9615_mtp            MACH_MSM9615_MTP        MSM9615_MTP             3681
-dm36x_spawndc          MACH_DM36X_SPAWNDC      DM36X_SPAWNDC           3682
-sff792                 MACH_SFF792             SFF792                  3683
-am335xiaevm            MACH_AM335XIAEVM        AM335XIAEVM             3684
-g3c2440                        MACH_G3C2440            G3C2440                 3685
-tion270                        MACH_TION270            TION270                 3686
-w22q7arm02             MACH_W22Q7ARM02         W22Q7ARM02              3687
-omap_cat               MACH_OMAP_CAT           OMAP_CAT                3688
-at91sam9n12ek          MACH_AT91SAM9N12EK      AT91SAM9N12EK           3689
-morrison               MACH_MORRISON           MORRISON                3690
-svdu                   MACH_SVDU               SVDU                    3691
-lpp01                  MACH_LPP01              LPP01                   3692
-ubc283                 MACH_UBC283             UBC283                  3693
-zeppelin               MACH_ZEPPELIN           ZEPPELIN                3694
-motus                  MACH_MOTUS              MOTUS                   3695
-neomainboard           MACH_NEOMAINBOARD       NEOMAINBOARD            3696
-devkit3250             MACH_DEVKIT3250         DEVKIT3250              3697
-devkit7000             MACH_DEVKIT7000         DEVKIT7000              3698
-fmc_uic                        MACH_FMC_UIC            FMC_UIC                 3699
-fmc_dcm                        MACH_FMC_DCM            FMC_DCM                 3700
-batwm                  MACH_BATWM              BATWM                   3701
-atlas6cb               MACH_ATLAS6CB           ATLAS6CB                3702
-blue                   MACH_BLUE               BLUE                    3705
-colorado               MACH_COLORADO           COLORADO                3706
-popc                   MACH_POPC               POPC                    3707
-promwad_jade           MACH_PROMWAD_JADE       PROMWAD_JADE            3708
-amp                    MACH_AMP                AMP                     3709
-gnet_amp               MACH_GNET_AMP           GNET_AMP                3710
-toques                 MACH_TOQUES             TOQUES                  3711
 apx4devkit             MACH_APX4DEVKIT         APX4DEVKIT              3712
-dct_storm              MACH_DCT_STORM          DCT_STORM               3713
-owl                    MACH_OWL                OWL                     3715
-cogent_csb1741         MACH_COGENT_CSB1741     COGENT_CSB1741          3716
-adillustra610          MACH_ADILLUSTRA610      ADILLUSTRA610           3718
-ecafe_na04             MACH_ECAFE_NA04         ECAFE_NA04              3719
-popct                  MACH_POPCT              POPCT                   3720
-omap3_helena           MACH_OMAP3_HELENA       OMAP3_HELENA            3721
-ach                    MACH_ACH                ACH                     3722
-module_dtb             MACH_MODULE_DTB         MODULE_DTB              3723
-oslo_elisabeth         MACH_OSLO_ELISABETH     OSLO_ELISABETH          3725
-tt01                   MACH_TT01               TT01                    3726
-msm8930_cdp            MACH_MSM8930_CDP        MSM8930_CDP             3727
-msm8930_mtp            MACH_MSM8930_MTP        MSM8930_MTP             3728
-msm8930_fluid          MACH_MSM8930_FLUID      MSM8930_FLUID           3729
-ltu11                  MACH_LTU11              LTU11                   3730
-am1808_spawnco         MACH_AM1808_SPAWNCO     AM1808_SPAWNCO          3731
-flx6410                        MACH_FLX6410            FLX6410                 3732
-mx6q_qsb               MACH_MX6Q_QSB           MX6Q_QSB                3733
-mx53_plt424            MACH_MX53_PLT424        MX53_PLT424             3734
-jasmine                        MACH_JASMINE            JASMINE                 3735
-l138_owlboard_plus     MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS      3736
-wr21                   MACH_WR21               WR21                    3737
-peaboy                 MACH_PEABOY             PEABOY                  3739
-mx28_plato             MACH_MX28_PLATO         MX28_PLATO              3740
-kacom2                 MACH_KACOM2             KACOM2                  3741
-slco                   MACH_SLCO               SLCO                    3742
-imx51pico              MACH_IMX51PICO          IMX51PICO               3743
-glink1                 MACH_GLINK1             GLINK1                  3744
-diamond                        MACH_DIAMOND            DIAMOND                 3745
-d9000                  MACH_D9000              D9000                   3746
-w5300e01               MACH_W5300E01           W5300E01                3747
-im6000                 MACH_IM6000             IM6000                  3748
-mx51_fred51            MACH_MX51_FRED51        MX51_FRED51             3749
-stm32f2                        MACH_STM32F2            STM32F2                 3750
-ville                  MACH_VILLE              VILLE                   3751
-ptip_murnau            MACH_PTIP_MURNAU        PTIP_MURNAU             3752
-ptip_classic           MACH_PTIP_CLASSIC       PTIP_CLASSIC            3753
-mx53grb                        MACH_MX53GRB            MX53GRB                 3754
-gagarin                        MACH_GAGARIN            GAGARIN                 3755
-nas2big                        MACH_NAS2BIG            NAS2BIG                 3757
-superfemto             MACH_SUPERFEMTO         SUPERFEMTO              3758
-teufel                 MACH_TEUFEL             TEUFEL                  3759
-dinara                 MACH_DINARA             DINARA                  3760
-vanquish               MACH_VANQUISH           VANQUISH                3761
-zipabox1               MACH_ZIPABOX1           ZIPABOX1                3762
-u9540                  MACH_U9540              U9540                   3763
-jet                    MACH_JET                JET                     3764
 smdk4412               MACH_SMDK4412           SMDK4412                3765
-elite                  MACH_ELITE              ELITE                   3766
-spear320_hmi           MACH_SPEAR320_HMI       SPEAR320_HMI            3767
-ontario                        MACH_ONTARIO            ONTARIO                 3768
-mx6q_sabrelite         MACH_MX6Q_SABRELITE     MX6Q_SABRELITE          3769
-vc200                  MACH_VC200              VC200                   3770
-msm7625a_ffa           MACH_MSM7625A_FFA       MSM7625A_FFA            3771
-msm7625a_surf          MACH_MSM7625A_SURF      MSM7625A_SURF           3772
-benthossbp             MACH_BENTHOSSBP         BENTHOSSBP              3773
-smdk5210               MACH_SMDK5210           SMDK5210                3774
-empq2300               MACH_EMPQ2300           EMPQ2300                3775
-minipos                        MACH_MINIPOS            MINIPOS                 3776
-omap5_sevm             MACH_OMAP5_SEVM         OMAP5_SEVM              3777
-shelter                        MACH_SHELTER            SHELTER                 3778
-omap3_devkit8500       MACH_OMAP3_DEVKIT8500   OMAP3_DEVKIT8500        3779
-edgetd                 MACH_EDGETD             EDGETD                  3780
-copperyard             MACH_COPPERYARD         COPPERYARD              3781
-edge_u                 MACH_EDGE_U             EDGE_U                  3783
-edge_td                        MACH_EDGE_TD            EDGE_TD                 3784
-wdss                   MACH_WDSS               WDSS                    3785
-dl_pb25                        MACH_DL_PB25            DL_PB25                 3786
-dss11                  MACH_DSS11              DSS11                   3787
-cpa                    MACH_CPA                CPA                     3788
-aptp2000               MACH_APTP2000           APTP2000                3789
 marzen                 MACH_MARZEN             MARZEN                  3790
-st_turbine             MACH_ST_TURBINE         ST_TURBINE              3791
-gtl_it3300             MACH_GTL_IT3300         GTL_IT3300              3792
-mx6_mule               MACH_MX6_MULE           MX6_MULE                3793
-v7pxa_dt               MACH_V7PXA_DT           V7PXA_DT                3794
-v7mmp_dt               MACH_V7MMP_DT           V7MMP_DT                3795
-dragon7                        MACH_DRAGON7            DRAGON7                 3796
 krome                  MACH_KROME              KROME                   3797
-oratisdante            MACH_ORATISDANTE        ORATISDANTE             3798
-fathom                 MACH_FATHOM             FATHOM                  3799
-dns325                 MACH_DNS325             DNS325                  3800
-sarnen                 MACH_SARNEN             SARNEN                  3801
-ubisys_g1              MACH_UBISYS_G1          UBISYS_G1               3802
-mx53_pf1               MACH_MX53_PF1           MX53_PF1                3803
-asanti                 MACH_ASANTI             ASANTI                  3804
-volta                  MACH_VOLTA              VOLTA                   3805
-knight                 MACH_KNIGHT             KNIGHT                  3807
-beaglebone             MACH_BEAGLEBONE         BEAGLEBONE              3808
-becker                 MACH_BECKER             BECKER                  3809
-fc360                  MACH_FC360              FC360                   3810
-pmi2_xls               MACH_PMI2_XLS           PMI2_XLS                3811
-taranto                        MACH_TARANTO            TARANTO                 3812
-plutux                 MACH_PLUTUX             PLUTUX                  3813
-ipmp_medcom            MACH_IPMP_MEDCOM        IPMP_MEDCOM             3814
-absolut                        MACH_ABSOLUT            ABSOLUT                 3815
-awpb3                  MACH_AWPB3              AWPB3                   3816
-nfp32xx_dt             MACH_NFP32XX_DT         NFP32XX_DT              3817
-dl_pb53                        MACH_DL_PB53            DL_PB53                 3818
-acu_ii                 MACH_ACU_II             ACU_II                  3819
-avalon                 MACH_AVALON             AVALON                  3820
-sphinx                 MACH_SPHINX             SPHINX                  3821
-titan_t                        MACH_TITAN_T            TITAN_T                 3822
-harvest_boris          MACH_HARVEST_BORIS      HARVEST_BORIS           3823
-mach_msm7x30_m3s       MACH_MACH_MSM7X30_M3S   MACH_MSM7X30_M3S        3824
-smdk5250               MACH_SMDK5250           SMDK5250                3825
-imxt_lite              MACH_IMXT_LITE          IMXT_LITE               3826
-imxt_std               MACH_IMXT_STD           IMXT_STD                3827
-imxt_log               MACH_IMXT_LOG           IMXT_LOG                3828
-imxt_nav               MACH_IMXT_NAV           IMXT_NAV                3829
-imxt_full              MACH_IMXT_FULL          IMXT_FULL               3830
-ag09015                        MACH_AG09015            AG09015                 3831
-am3517_mt_ventoux      MACH_AM3517_MT_VENTOUX  AM3517_MT_VENTOUX       3832
-dp1arm9                        MACH_DP1ARM9            DP1ARM9                 3833
-picasso_m              MACH_PICASSO_M          PICASSO_M               3834
-video_gadget           MACH_VIDEO_GADGET       VIDEO_GADGET            3835
-mtt_om3x               MACH_MTT_OM3X           MTT_OM3X                3836
-mx6q_arm2              MACH_MX6Q_ARM2          MX6Q_ARM2               3837
-picosam9g45            MACH_PICOSAM9G45        PICOSAM9G45             3838
-vpm_dm365              MACH_VPM_DM365          VPM_DM365               3839
-bonfire                        MACH_BONFIRE            BONFIRE                 3840
-mt2p2d                 MACH_MT2P2D             MT2P2D                  3841
-sigpda01               MACH_SIGPDA01           SIGPDA01                3842
-cn27                   MACH_CN27               CN27                    3843
-mx25_cwtap             MACH_MX25_CWTAP         MX25_CWTAP              3844
-apf28                  MACH_APF28              APF28                   3845
-pelco_maxwell          MACH_PELCO_MAXWELL      PELCO_MAXWELL           3846
-ge_phoenix             MACH_GE_PHOENIX         GE_PHOENIX              3847
-empc_a500              MACH_EMPC_A500          EMPC_A500               3848
-ims_arm9               MACH_IMS_ARM9           IMS_ARM9                3849
-mini2416               MACH_MINI2416           MINI2416                3850
-mini2450               MACH_MINI2450           MINI2450                3851
-mini310                        MACH_MINI310            MINI310                 3852
-spear_hurricane                MACH_SPEAR_HURRICANE    SPEAR_HURRICANE         3853
-mt7208                 MACH_MT7208             MT7208                  3854
-lpc178x                        MACH_LPC178X            LPC178X                 3855
-farleys                        MACH_FARLEYS            FARLEYS                 3856
-efm32gg_dk3750         MACH_EFM32GG_DK3750     EFM32GG_DK3750          3857
-zeus_board             MACH_ZEUS_BOARD         ZEUS_BOARD              3858
-cc51                   MACH_CC51               CC51                    3859
-fxi_c210               MACH_FXI_C210           FXI_C210                3860
-msm8627_cdp            MACH_MSM8627_CDP        MSM8627_CDP             3861
-msm8627_mtp            MACH_MSM8627_MTP        MSM8627_MTP             3862
 armadillo800eva                MACH_ARMADILLO800EVA    ARMADILLO800EVA         3863
-primou                 MACH_PRIMOU             PRIMOU                  3864
-primoc                 MACH_PRIMOC             PRIMOC                  3865
-primoct                        MACH_PRIMOCT            PRIMOCT                 3866
-a9500                  MACH_A9500              A9500                   3867
-pluto                  MACH_PLUTO              PLUTO                   3869
-acfx100                        MACH_ACFX100            ACFX100                 3870
-msm8625_rumi3          MACH_MSM8625_RUMI3      MSM8625_RUMI3           3871
-valente                        MACH_VALENTE            VALENTE                 3872
-crfs_rfeye             MACH_CRFS_RFEYE         CRFS_RFEYE              3873
-rfeye                  MACH_RFEYE              RFEYE                   3874
-phidget_sbc3           MACH_PHIDGET_SBC3       PHIDGET_SBC3            3875
-tcw_mika               MACH_TCW_MIKA           TCW_MIKA                3876
-imx28_egf              MACH_IMX28_EGF          IMX28_EGF               3877
-valente_wx             MACH_VALENTE_WX         VALENTE_WX              3878
-huangshans             MACH_HUANGSHANS         HUANGSHANS              3879
-bosphorus1             MACH_BOSPHORUS1         BOSPHORUS1              3880
-prima                  MACH_PRIMA              PRIMA                   3881
-evita_ulk              MACH_EVITA_ULK          EVITA_ULK               3884
-merisc600              MACH_MERISC600          MERISC600               3885
-dolak                  MACH_DOLAK              DOLAK                   3886
-sbc53                  MACH_SBC53              SBC53                   3887
-elite_ulk              MACH_ELITE_ULK          ELITE_ULK               3888
-pov2                   MACH_POV2               POV2                    3889
-ipod_touch_2g          MACH_IPOD_TOUCH_2G      IPOD_TOUCH_2G           3890
-da850_pqab             MACH_DA850_PQAB         DA850_PQAB              3891
-fermi                  MACH_FERMI              FERMI                   3892
-ccardwmx28             MACH_CCARDWMX28         CCARDWMX28              3893
-ccardmx28              MACH_CCARDMX28          CCARDMX28               3894
-fs20_fcm2050           MACH_FS20_FCM2050       FS20_FCM2050            3895
-kinetis                        MACH_KINETIS            KINETIS                 3896
-kai                    MACH_KAI                KAI                     3897
-bcthb2                 MACH_BCTHB2             BCTHB2                  3898
-inels3_cu              MACH_INELS3_CU          INELS3_CU               3899
-da850_apollo           MACH_DA850_APOLLO       DA850_APOLLO            3901
-tracnas                        MACH_TRACNAS            TRACNAS                 3902
-mityarm335x            MACH_MITYARM335X        MITYARM335X             3903
-xcgz7x                 MACH_XCGZ7X             XCGZ7X                  3904
-cubox                  MACH_CUBOX              CUBOX                   3905
-terminator             MACH_TERMINATOR         TERMINATOR              3906
-eye03                  MACH_EYE03              EYE03                   3907
-kota3                  MACH_KOTA3              KOTA3                   3908
-pscpe                  MACH_PSCPE              PSCPE                   3910
-akt1100                        MACH_AKT1100            AKT1100                 3911
-pcaaxl2                        MACH_PCAAXL2            PCAAXL2                 3912
-primodd_ct             MACH_PRIMODD_CT         PRIMODD_CT              3913
-nsbc                   MACH_NSBC               NSBC                    3914
-meson2_skt             MACH_MESON2_SKT         MESON2_SKT              3915
-meson2_ref             MACH_MESON2_REF         MESON2_REF              3916
-ccardwmx28js           MACH_CCARDWMX28JS       CCARDWMX28JS            3917
-ccardmx28js            MACH_CCARDMX28JS        CCARDMX28JS             3918
-indico                 MACH_INDICO             INDICO                  3919
-msm8960dt              MACH_MSM8960DT          MSM8960DT               3920
-primods                        MACH_PRIMODS            PRIMODS                 3921
-beluga_m1388           MACH_BELUGA_M1388       BELUGA_M1388            3922
-primotd                        MACH_PRIMOTD            PRIMOTD                 3923
-varan_master           MACH_VARAN_MASTER       VARAN_MASTER            3924
-primodd                        MACH_PRIMODD            PRIMODD                 3925
-jetduo                 MACH_JETDUO             JETDUO                  3926
 mx53_umobo             MACH_MX53_UMOBO         MX53_UMOBO              3927
-trats                  MACH_TRATS              TRATS                   3928
-starcraft              MACH_STARCRAFT          STARCRAFT               3929
-qseven_tegra2          MACH_QSEVEN_TEGRA2      QSEVEN_TEGRA2           3930
-lichee_sun4i_devbd     MACH_LICHEE_SUN4I_DEVBD LICHEE_SUN4I_DEVBD      3931
-movenow                        MACH_MOVENOW            MOVENOW                 3932
-golf_u                 MACH_GOLF_U             GOLF_U                  3933
-msm7627a_evb           MACH_MSM7627A_EVB       MSM7627A_EVB            3934
-rambo                  MACH_RAMBO              RAMBO                   3935
-golfu                  MACH_GOLFU              GOLFU                   3936
-mango310               MACH_MANGO310           MANGO310                3937
-dns343                 MACH_DNS343             DNS343                  3938
-var_som_om44           MACH_VAR_SOM_OM44       VAR_SOM_OM44            3939
-naon                   MACH_NAON               NAON                    3940
-vp4000                 MACH_VP4000             VP4000                  3941
-impcard                        MACH_IMPCARD            IMPCARD                 3942
-smoovcam               MACH_SMOOVCAM           SMOOVCAM                3943
-cobham3725             MACH_COBHAM3725         COBHAM3725              3944
-cobham3730             MACH_COBHAM3730         COBHAM3730              3945
-cobham3703             MACH_COBHAM3703         COBHAM3703              3946
-quetzal                        MACH_QUETZAL            QUETZAL                 3947
-apq8064_cdp            MACH_APQ8064_CDP        APQ8064_CDP             3948
-apq8064_mtp            MACH_APQ8064_MTP        APQ8064_MTP             3949
-apq8064_fluid          MACH_APQ8064_FLUID      APQ8064_FLUID           3950
-apq8064_liquid         MACH_APQ8064_LIQUID     APQ8064_LIQUID          3951
-mango210               MACH_MANGO210           MANGO210                3952
-mango100               MACH_MANGO100           MANGO100                3953
-mango24                        MACH_MANGO24            MANGO24                 3954
-mango64                        MACH_MANGO64            MANGO64                 3955
-nsa320                 MACH_NSA320             NSA320                  3956
-elv_ccu2               MACH_ELV_CCU2           ELV_CCU2                3957
-triton_x00             MACH_TRITON_X00         TRITON_X00              3958
-triton_1500_2000       MACH_TRITON_1500_2000   TRITON_1500_2000        3959
-pogoplugv4             MACH_POGOPLUGV4         POGOPLUGV4              3960
-venus_cl               MACH_VENUS_CL           VENUS_CL                3961
-vulcano_g20            MACH_VULCANO_G20        VULCANO_G20             3962
-sgs_i9100              MACH_SGS_I9100          SGS_I9100               3963
-stsv2                  MACH_STSV2              STSV2                   3964
-csb1724                        MACH_CSB1724            CSB1724                 3965
-omapl138_lcdk          MACH_OMAPL138_LCDK      OMAPL138_LCDK           3966
-pvd_mx25               MACH_PVD_MX25           PVD_MX25                3968
-meson6_skt             MACH_MESON6_SKT         MESON6_SKT              3969
-meson6_ref             MACH_MESON6_REF         MESON6_REF              3970
-pxm                    MACH_PXM                PXM                     3971
-pogoplugv3             MACH_POGOPLUGV3         POGOPLUGV3              3973
-mlp89626               MACH_MLP89626           MLP89626                3974
-iomegahmndce           MACH_IOMEGAHMNDCE       IOMEGAHMNDCE            3975
-pogoplugv3pci          MACH_POGOPLUGV3PCI      POGOPLUGV3PCI           3976
-bntv250                        MACH_BNTV250            BNTV250                 3977
-mx53_qseven            MACH_MX53_QSEVEN        MX53_QSEVEN             3978
-gtl_it1100             MACH_GTL_IT1100         GTL_IT1100              3979
-mx6q_sabresd           MACH_MX6Q_SABRESD       MX6Q_SABRESD            3980
 mt4                    MACH_MT4                MT4                     3981
-jumbo_d                        MACH_JUMBO_D            JUMBO_D                 3982
-jumbo_i                        MACH_JUMBO_I            JUMBO_I                 3983
-fs20_dmp               MACH_FS20_DMP           FS20_DMP                3984
-dns320                 MACH_DNS320             DNS320                  3985
-mx28bacos              MACH_MX28BACOS          MX28BACOS               3986
-tl80                   MACH_TL80               TL80                    3987
-polatis_nic_1001       MACH_POLATIS_NIC_1001   POLATIS_NIC_1001        3988
-tely                   MACH_TELY               TELY                    3989
 u8520                  MACH_U8520              U8520                   3990
-manta                  MACH_MANTA              MANTA                   3991
-mpq8064_cdp            MACH_MPQ8064_CDP        MPQ8064_CDP             3993
-mpq8064_dtv            MACH_MPQ8064_DTV        MPQ8064_DTV             3995
-dm368som               MACH_DM368SOM           DM368SOM                3996
-gprisb2                        MACH_GPRISB2            GPRISB2                 3997
-chammid                        MACH_CHAMMID            CHAMMID                 3998
-seoul2                 MACH_SEOUL2             SEOUL2                  3999
-omap4_nooktablet       MACH_OMAP4_NOOKTABLET   OMAP4_NOOKTABLET        4000
-aalto                  MACH_AALTO              AALTO                   4001
-metro                  MACH_METRO              METRO                   4002
-cydm3730               MACH_CYDM3730           CYDM3730                4003
-tqma53                 MACH_TQMA53             TQMA53                  4004
-msm7627a_qrd3          MACH_MSM7627A_QRD3      MSM7627A_QRD3           4005
-mx28_canby             MACH_MX28_CANBY         MX28_CANBY              4006
-tiger                  MACH_TIGER              TIGER                   4007
-pcats_9307_type_a      MACH_PCATS_9307_TYPE_A  PCATS_9307_TYPE_A       4008
-pcats_9307_type_o      MACH_PCATS_9307_TYPE_O  PCATS_9307_TYPE_O       4009
-pcats_9307_type_r      MACH_PCATS_9307_TYPE_R  PCATS_9307_TYPE_R       4010
-streamplug             MACH_STREAMPLUG         STREAMPLUG              4011
-icechicken_dev         MACH_ICECHICKEN_DEV     ICECHICKEN_DEV          4012
-hedgehog               MACH_HEDGEHOG           HEDGEHOG                4013
-yusend_obc             MACH_YUSEND_OBC         YUSEND_OBC              4014
-imxninja               MACH_IMXNINJA           IMXNINJA                4015
-omap4_jarod            MACH_OMAP4_JAROD        OMAP4_JAROD             4016
-eco5_pk                        MACH_ECO5_PK            ECO5_PK                 4017
-qj2440                 MACH_QJ2440             QJ2440                  4018
-mx6q_mercury           MACH_MX6Q_MERCURY       MX6Q_MERCURY            4019
-cm6810                 MACH_CM6810             CM6810                  4020
-omap4_torpedo          MACH_OMAP4_TORPEDO      OMAP4_TORPEDO           4021
-nsa310                 MACH_NSA310             NSA310                  4022
-tmx536                 MACH_TMX536             TMX536                  4023
-ktt20                  MACH_KTT20              KTT20                   4024
-dragonix               MACH_DRAGONIX           DRAGONIX                4025
-lungching              MACH_LUNGCHING          LUNGCHING               4026
-bulogics               MACH_BULOGICS           BULOGICS                4027
-mx535_sx               MACH_MX535_SX           MX535_SX                4028
-ngui3250               MACH_NGUI3250           NGUI3250                4029
-salutec_dac            MACH_SALUTEC_DAC        SALUTEC_DAC             4030
-loco                   MACH_LOCO               LOCO                    4031
-ctera_plug_usi         MACH_CTERA_PLUG_USI     CTERA_PLUG_USI          4032
-scepter                        MACH_SCEPTER            SCEPTER                 4033
-sga                    MACH_SGA                SGA                     4034
-p_81_j5                        MACH_P_81_J5            P_81_J5                 4035
-p_81_o4                        MACH_P_81_O4            P_81_O4                 4036
-msm8625_surf           MACH_MSM8625_SURF       MSM8625_SURF            4037
-carallon_shark         MACH_CARALLON_SHARK     CARALLON_SHARK          4038
-ordog                  MACH_ORDOG              ORDOG                   4040
-puente_io              MACH_PUENTE_IO          PUENTE_IO               4041
-msm8625_evb            MACH_MSM8625_EVB        MSM8625_EVB             4042
-ev_am1707              MACH_EV_AM1707          EV_AM1707               4043
-ev_am1707e2            MACH_EV_AM1707E2        EV_AM1707E2             4044
-ev_am3517e2            MACH_EV_AM3517E2        EV_AM3517E2             4045
-calabria               MACH_CALABRIA           CALABRIA                4046
-ev_imx287              MACH_EV_IMX287          EV_IMX287               4047
-erau                   MACH_ERAU               ERAU                    4048
-sichuan                        MACH_SICHUAN            SICHUAN                 4049
-davinci_da850          MACH_DAVINCI_DA850      DAVINCI_DA850           4051
-omap138_trunarc                MACH_OMAP138_TRUNARC    OMAP138_TRUNARC         4052
-bcm4761                        MACH_BCM4761            BCM4761                 4053
-picasso_e2             MACH_PICASSO_E2         PICASSO_E2              4054
-picasso_mf             MACH_PICASSO_MF         PICASSO_MF              4055
-miro                   MACH_MIRO               MIRO                    4056
-at91sam9g20ewon3       MACH_AT91SAM9G20EWON3   AT91SAM9G20EWON3        4057
-yoyo                   MACH_YOYO               YOYO                    4058
-windjkl                        MACH_WINDJKL            WINDJKL                 4059
-monarudo               MACH_MONARUDO           MONARUDO                4060
-batan                  MACH_BATAN              BATAN                   4061
-tadao                  MACH_TADAO              TADAO                   4062
-baso                   MACH_BASO               BASO                    4063
-mahon                  MACH_MAHON              MAHON                   4064
-villec2                        MACH_VILLEC2            VILLEC2                 4065
-asi1230                        MACH_ASI1230            ASI1230                 4066
-alaska                 MACH_ALASKA             ALASKA                  4067
-swarco_shdsl2          MACH_SWARCO_SHDSL2      SWARCO_SHDSL2           4068
-oxrtu                  MACH_OXRTU              OXRTU                   4069
-omap5_panda            MACH_OMAP5_PANDA        OMAP5_PANDA             4070
-c8000                  MACH_C8000              C8000                   4072
-bje_display3_5         MACH_BJE_DISPLAY3_5     BJE_DISPLAY3_5          4073
-picomod7               MACH_PICOMOD7           PICOMOD7                4074
-picocom5               MACH_PICOCOM5           PICOCOM5                4075
-qblissa8               MACH_QBLISSA8           QBLISSA8                4076
-armstonea8             MACH_ARMSTONEA8         ARMSTONEA8              4077
-netdcu14               MACH_NETDCU14           NETDCU14                4078
-at91sam9x5_epiphan     MACH_AT91SAM9X5_EPIPHAN AT91SAM9X5_EPIPHAN      4079
-p2u                    MACH_P2U                P2U                     4080
-doris                  MACH_DORIS              DORIS                   4081
-j49                    MACH_J49                J49                     4082
-vdss2e                 MACH_VDSS2E             VDSS2E                  4083
-vc300                  MACH_VC300              VC300                   4084
-ns115_pad_test         MACH_NS115_PAD_TEST     NS115_PAD_TEST          4085
-ns115_pad_ref          MACH_NS115_PAD_REF      NS115_PAD_REF           4086
-ns115_phone_test       MACH_NS115_PHONE_TEST   NS115_PHONE_TEST        4087
-ns115_phone_ref                MACH_NS115_PHONE_REF    NS115_PHONE_REF         4088
-golfc                  MACH_GOLFC              GOLFC                   4089
-xerox_olympus          MACH_XEROX_OLYMPUS      XEROX_OLYMPUS           4090
-mx6sl_arm2             MACH_MX6SL_ARM2         MX6SL_ARM2              4091
-csb1701_csb1726                MACH_CSB1701_CSB1726    CSB1701_CSB1726         4092
-at91sam9xeek           MACH_AT91SAM9XEEK       AT91SAM9XEEK            4093
-ebv210                 MACH_EBV210             EBV210                  4094
-msm7627a_qrd7          MACH_MSM7627A_QRD7      MSM7627A_QRD7           4095
-svthin                 MACH_SVTHIN             SVTHIN                  4096
-duovero                        MACH_DUOVERO            DUOVERO                 4097
 chupacabra             MACH_CHUPACABRA         CHUPACABRA              4098
 scorpion               MACH_SCORPION           SCORPION                4099
 davinci_he_hmi10       MACH_DAVINCI_HE_HMI10   DAVINCI_HE_HMI10        4100
@@ -1157,7 +580,6 @@ tam335x                    MACH_TAM335X            TAM335X                 4116
 grouper                        MACH_GROUPER            GROUPER                 4117
 mpcsa21_9g20           MACH_MPCSA21_9G20       MPCSA21_9G20            4118
 m6u_cpu                        MACH_M6U_CPU            M6U_CPU                 4119
-davinci_dp10           MACH_DAVINCI_DP10       DAVINCI_DP10            4120
 ginkgo                 MACH_GINKGO             GINKGO                  4121
 cgt_qmx6               MACH_CGT_QMX6           CGT_QMX6                4122
 profpga                        MACH_PROFPGA            PROFPGA                 4123
@@ -1204,3 +626,384 @@ baileys                  MACH_BAILEYS            BAILEYS                 4169
 familybox              MACH_FAMILYBOX          FAMILYBOX               4170
 ensemble_mx35          MACH_ENSEMBLE_MX35      ENSEMBLE_MX35           4171
 sc_sps_1               MACH_SC_SPS_1           SC_SPS_1                4172
+ucsimply_sam9260       MACH_UCSIMPLY_SAM9260   UCSIMPLY_SAM9260        4173
+unicorn                        MACH_UNICORN            UNICORN                 4174
+m9g45a                 MACH_M9G45A             M9G45A                  4175
+mtwebif                        MACH_MTWEBIF            MTWEBIF                 4176
+playstone              MACH_PLAYSTONE          PLAYSTONE               4177
+chelsea                        MACH_CHELSEA            CHELSEA                 4178
+bayern                 MACH_BAYERN             BAYERN                  4179
+mitwo                  MACH_MITWO              MITWO                   4180
+mx25_noah              MACH_MX25_NOAH          MX25_NOAH               4181
+stm_b2020              MACH_STM_B2020          STM_B2020               4182
+annax_src              MACH_ANNAX_SRC          ANNAX_SRC               4183
+ionics_stratus         MACH_IONICS_STRATUS     IONICS_STRATUS          4184
+hugo                   MACH_HUGO               HUGO                    4185
+em300                  MACH_EM300              EM300                   4186
+mmp3_qseven            MACH_MMP3_QSEVEN        MMP3_QSEVEN             4187
+bosphorus2             MACH_BOSPHORUS2         BOSPHORUS2              4188
+tt2200                 MACH_TT2200             TT2200                  4189
+ocelot3                        MACH_OCELOT3            OCELOT3                 4190
+tek_cobra              MACH_TEK_COBRA          TEK_COBRA               4191
+protou                 MACH_PROTOU             PROTOU                  4192
+msm8625_evt            MACH_MSM8625_EVT        MSM8625_EVT             4193
+mx53_sellwood          MACH_MX53_SELLWOOD      MX53_SELLWOOD           4194
+somiq_am35             MACH_SOMIQ_AM35         SOMIQ_AM35              4195
+somiq_am37             MACH_SOMIQ_AM37         SOMIQ_AM37              4196
+k2_plc_cl              MACH_K2_PLC_CL          K2_PLC_CL               4197
+tc2                    MACH_TC2                TC2                     4198
+dulex_j                        MACH_DULEX_J            DULEX_J                 4199
+stm_b2044              MACH_STM_B2044          STM_B2044               4200
+deluxe_j               MACH_DELUXE_J           DELUXE_J                4201
+mango2443              MACH_MANGO2443          MANGO2443               4202
+cp2dcg                 MACH_CP2DCG             CP2DCG                  4203
+cp2dtg                 MACH_CP2DTG             CP2DTG                  4204
+cp2dug                 MACH_CP2DUG             CP2DUG                  4205
+var_som_am33           MACH_VAR_SOM_AM33       VAR_SOM_AM33            4206
+pepper                 MACH_PEPPER             PEPPER                  4207
+mango2450              MACH_MANGO2450          MANGO2450               4208
+valente_wx_c9          MACH_VALENTE_WX_C9      VALENTE_WX_C9           4209
+minitv                 MACH_MINITV             MINITV                  4210
+u8540                  MACH_U8540              U8540                   4211
+iv_atlas_i_z7e         MACH_IV_ATLAS_I_Z7E     IV_ATLAS_I_Z7E          4212
+mach_type_sky          MACH_MACH_TYPE_SKY      MACH_TYPE_SKY           4214
+bluesky                        MACH_BLUESKY            BLUESKY                 4215
+ngrouter               MACH_NGROUTER           NGROUTER                4216
+mx53_denetim           MACH_MX53_DENETIM       MX53_DENETIM            4217
+opal                   MACH_OPAL               OPAL                    4218
+gnet_us3gref           MACH_GNET_US3GREF       GNET_US3GREF            4219
+gnet_nc3g              MACH_GNET_NC3G          GNET_NC3G               4220
+gnet_ge3g              MACH_GNET_GE3G          GNET_GE3G               4221
+adp2                   MACH_ADP2               ADP2                    4222
+tqma28                 MACH_TQMA28             TQMA28                  4223
+kacom3                 MACH_KACOM3             KACOM3                  4224
+rrhdemo                        MACH_RRHDEMO            RRHDEMO                 4225
+protodug               MACH_PROTODUG           PROTODUG                4226
+lago                   MACH_LAGO               LAGO                    4227
+ktt30                  MACH_KTT30              KTT30                   4228
+ts43xx                 MACH_TS43XX             TS43XX                  4229
+mx6q_denso             MACH_MX6Q_DENSO         MX6Q_DENSO              4230
+comsat_gsmumts8                MACH_COMSAT_GSMUMTS8    COMSAT_GSMUMTS8         4231
+dreamx                 MACH_DREAMX             DREAMX                  4232
+thunderstonem          MACH_THUNDERSTONEM      THUNDERSTONEM           4233
+yoyopad                        MACH_YOYOPAD            YOYOPAD                 4234
+yoyopatient            MACH_YOYOPATIENT        YOYOPATIENT             4235
+a10l                   MACH_A10L               A10L                    4236
+mq60                   MACH_MQ60               MQ60                    4237
+linkstation_lsql       MACH_LINKSTATION_LSQL   LINKSTATION_LSQL        4238
+am3703gateway          MACH_AM3703GATEWAY      AM3703GATEWAY           4239
+accipiter              MACH_ACCIPITER          ACCIPITER               4240
+magnidug               MACH_MAGNIDUG           MAGNIDUG                4242
+hydra                  MACH_HYDRA              HYDRA                   4243
+sun3i                  MACH_SUN3I              SUN3I                   4244
+stm_b2078              MACH_STM_B2078          STM_B2078               4245
+at91sam9263deskv2      MACH_AT91SAM9263DESKV2  AT91SAM9263DESKV2       4246
+deluxe_r               MACH_DELUXE_R           DELUXE_R                4247
+p_98_v                 MACH_P_98_V             P_98_V                  4248
+p_98_c                 MACH_P_98_C             P_98_C                  4249
+davinci_am18xx_omn     MACH_DAVINCI_AM18XX_OMN DAVINCI_AM18XX_OMN      4250
+socfpga_cyclone5       MACH_SOCFPGA_CYCLONE5   SOCFPGA_CYCLONE5        4251
+cabatuin               MACH_CABATUIN           CABATUIN                4252
+yoyopad_ft             MACH_YOYOPAD_FT         YOYOPAD_FT              4253
+dan2400evb             MACH_DAN2400EVB         DAN2400EVB              4254
+dan3400evb             MACH_DAN3400EVB         DAN3400EVB              4255
+edm_sf_imx6            MACH_EDM_SF_IMX6        EDM_SF_IMX6             4256
+edm_cf_imx6            MACH_EDM_CF_IMX6        EDM_CF_IMX6             4257
+vpos3xx                        MACH_VPOS3XX            VPOS3XX                 4258
+vulcano_9x5            MACH_VULCANO_9X5        VULCANO_9X5             4259
+spmp8000               MACH_SPMP8000           SPMP8000                4260
+catalina               MACH_CATALINA           CATALINA                4261
+rd88f5181l_fe          MACH_RD88F5181L_FE      RD88F5181L_FE           4262
+mx535_mx               MACH_MX535_MX           MX535_MX                4263
+armadillo840           MACH_ARMADILLO840       ARMADILLO840            4264
+spc9000baseboard       MACH_SPC9000BASEBOARD   SPC9000BASEBOARD        4265
+iris                   MACH_IRIS               IRIS                    4266
+protodcg               MACH_PROTODCG           PROTODCG                4267
+palmtree               MACH_PALMTREE           PALMTREE                4268
+novena                 MACH_NOVENA             NOVENA                  4269
+ma_um                  MACH_MA_UM              MA_UM                   4270
+ma_am                  MACH_MA_AM              MA_AM                   4271
+ems348                 MACH_EMS348             EMS348                  4272
+cm_fx6                 MACH_CM_FX6             CM_FX6                  4273
+arndale                        MACH_ARNDALE            ARNDALE                 4274
+q5xr5                  MACH_Q5XR5              Q5XR5                   4275
+willow                 MACH_WILLOW             WILLOW                  4276
+omap3621_odyv3         MACH_OMAP3621_ODYV3     OMAP3621_ODYV3          4277
+omapl138_presonus      MACH_OMAPL138_PRESONUS  OMAPL138_PRESONUS       4278
+dvf99                  MACH_DVF99              DVF99                   4279
+impression_j           MACH_IMPRESSION_J       IMPRESSION_J            4280
+qblissa9               MACH_QBLISSA9           QBLISSA9                4281
+robin_heliview10       MACH_ROBIN_HELIVIEW10   ROBIN_HELIVIEW10        4282
+sun7i                  MACH_SUN7I              SUN7I                   4283
+mx6q_hdmidongle                MACH_MX6Q_HDMIDONGLE    MX6Q_HDMIDONGLE         4284
+mx6_sid2               MACH_MX6_SID2           MX6_SID2                4285
+helios_v3              MACH_HELIOS_V3          HELIOS_V3               4286
+helios_v4              MACH_HELIOS_V4          HELIOS_V4               4287
+q7_imx6                        MACH_Q7_IMX6            Q7_IMX6                 4288
+odroidx                        MACH_ODROIDX            ODROIDX                 4289
+robpro                 MACH_ROBPRO             ROBPRO                  4290
+research59if_mk1       MACH_RESEARCH59IF_MK1   RESEARCH59IF_MK1        4291
+bobsleigh              MACH_BOBSLEIGH          BOBSLEIGH               4292
+dcshgwt3               MACH_DCSHGWT3           DCSHGWT3                4293
+gld1018                        MACH_GLD1018            GLD1018                 4294
+ev10                   MACH_EV10               EV10                    4295
+nitrogen6x             MACH_NITROGEN6X         NITROGEN6X              4296
+p_107_bb               MACH_P_107_BB           P_107_BB                4297
+evita_utl              MACH_EVITA_UTL          EVITA_UTL               4298
+falconwing             MACH_FALCONWING         FALCONWING              4299
+dct3                   MACH_DCT3               DCT3                    4300
+cpx2e_cell             MACH_CPX2E_CELL         CPX2E_CELL              4301
+amiro                  MACH_AMIRO              AMIRO                   4302
+mx6q_brassboard                MACH_MX6Q_BRASSBOARD    MX6Q_BRASSBOARD         4303
+dalmore                        MACH_DALMORE            DALMORE                 4304
+omap3_portal7cp                MACH_OMAP3_PORTAL7CP    OMAP3_PORTAL7CP         4305
+tegra_pluto            MACH_TEGRA_PLUTO        TEGRA_PLUTO             4306
+mx6sl_evk              MACH_MX6SL_EVK          MX6SL_EVK               4307
+m7                     MACH_M7                 M7                      4308
+pxm2                   MACH_PXM2               PXM2                    4309
+haba_knx_lite          MACH_HABA_KNX_LITE      HABA_KNX_LITE           4310
+tai                    MACH_TAI                TAI                     4311
+prototd                        MACH_PROTOTD            PROTOTD                 4312
+dst_tonto              MACH_DST_TONTO          DST_TONTO               4313
+draco                  MACH_DRACO              DRACO                   4314
+dxr2                   MACH_DXR2               DXR2                    4315
+rut                    MACH_RUT                RUT                     4316
+am180x_wsc             MACH_AM180X_WSC         AM180X_WSC              4317
+deluxe_u               MACH_DELUXE_U           DELUXE_U                4318
+deluxe_ul              MACH_DELUXE_UL          DELUXE_UL               4319
+at91sam9260medths      MACH_AT91SAM9260MEDTHS  AT91SAM9260MEDTHS       4320
+matrix516              MACH_MATRIX516          MATRIX516               4321
+vid401x                        MACH_VID401X            VID401X                 4322
+helios_v5              MACH_HELIOS_V5          HELIOS_V5               4323
+playpaq2               MACH_PLAYPAQ2           PLAYPAQ2                4324
+igam                   MACH_IGAM               IGAM                    4325
+amico_i                        MACH_AMICO_I            AMICO_I                 4326
+amico_e                        MACH_AMICO_E            AMICO_E                 4327
+sentient_mm3_ck                MACH_SENTIENT_MM3_CK    SENTIENT_MM3_CK         4328
+smx6                   MACH_SMX6               SMX6                    4329
+pango                  MACH_PANGO              PANGO                   4330
+ns115_stick            MACH_NS115_STICK        NS115_STICK             4331
+bctrm3                 MACH_BCTRM3             BCTRM3                  4332
+doctorws               MACH_DOCTORWS           DOCTORWS                4333
+m2601                  MACH_M2601              M2601                   4334
+vgg1111                        MACH_VGG1111            VGG1111                 4337
+countach               MACH_COUNTACH           COUNTACH                4338
+visstrim_sm20          MACH_VISSTRIM_SM20      VISSTRIM_SM20           4339
+a639                   MACH_A639               A639                    4340
+spacemonkey            MACH_SPACEMONKEY        SPACEMONKEY             4341
+zpdu_stamp             MACH_ZPDU_STAMP         ZPDU_STAMP              4342
+htc_g7_clone           MACH_HTC_G7_CLONE       HTC_G7_CLONE            4343
+ft2080_corvus          MACH_FT2080_CORVUS      FT2080_CORVUS           4344
+fisland                        MACH_FISLAND            FISLAND                 4345
+zpdu                   MACH_ZPDU               ZPDU                    4346
+urt                    MACH_URT                URT                     4347
+conti_ovip             MACH_CONTI_OVIP         CONTI_OVIP              4348
+omapl138_nagra         MACH_OMAPL138_NAGRA     OMAPL138_NAGRA          4349
+da850_at3kp1           MACH_DA850_AT3KP1       DA850_AT3KP1            4350
+da850_at3kp2           MACH_DA850_AT3KP2       DA850_AT3KP2            4351
+surma                  MACH_SURMA              SURMA                   4352
+stm_b2092              MACH_STM_B2092          STM_B2092               4353
+mx535_ycr              MACH_MX535_YCR          MX535_YCR               4354
+m7_wl                  MACH_M7_WL              M7_WL                   4355
+m7_u                   MACH_M7_U               M7_U                    4356
+omap3_stndt_evm                MACH_OMAP3_STNDT_EVM    OMAP3_STNDT_EVM         4357
+m7_wlv                 MACH_M7_WLV             M7_WLV                  4358
+xam3517                        MACH_XAM3517            XAM3517                 4359
+a220                   MACH_A220               A220                    4360
+aclima_odie            MACH_ACLIMA_ODIE        ACLIMA_ODIE             4361
+vibble                 MACH_VIBBLE             VIBBLE                  4362
+k2_u                   MACH_K2_U               K2_U                    4363
+mx53_egf               MACH_MX53_EGF           MX53_EGF                4364
+novpek_imx53           MACH_NOVPEK_IMX53       NOVPEK_IMX53            4365
+novpek_imx6x           MACH_NOVPEK_IMX6X       NOVPEK_IMX6X            4366
+mx25_smartbox          MACH_MX25_SMARTBOX      MX25_SMARTBOX           4367
+eicg6410               MACH_EICG6410           EICG6410                4368
+picasso_e3             MACH_PICASSO_E3         PICASSO_E3              4369
+motonavigator          MACH_MOTONAVIGATOR      MOTONAVIGATOR           4370
+varioconnect2          MACH_VARIOCONNECT2      VARIOCONNECT2           4371
+deluxe_tw              MACH_DELUXE_TW          DELUXE_TW               4372
+kore3                  MACH_KORE3              KORE3                   4374
+mx6s_drs               MACH_MX6S_DRS           MX6S_DRS                4375
+cmimx6                 MACH_CMIMX6             CMIMX6                  4376
+roth                   MACH_ROTH               ROTH                    4377
+eq4ux                  MACH_EQ4UX              EQ4UX                   4378
+x1plus                 MACH_X1PLUS             X1PLUS                  4379
+modimx27               MACH_MODIMX27           MODIMX27                4380
+videon_hduac           MACH_VIDEON_HDUAC       VIDEON_HDUAC            4381
+blackbird              MACH_BLACKBIRD          BLACKBIRD               4382
+runmaster              MACH_RUNMASTER          RUNMASTER               4383
+ceres                  MACH_CERES              CERES                   4384
+nad435                 MACH_NAD435             NAD435                  4385
+ns115_proto_type       MACH_NS115_PROTO_TYPE   NS115_PROTO_TYPE        4386
+fs20_vcc               MACH_FS20_VCC           FS20_VCC                4387
+meson6tv_skt           MACH_MESON6TV_SKT       MESON6TV_SKT            4389
+keystone               MACH_KEYSTONE           KEYSTONE                4390
+pcm052                 MACH_PCM052             PCM052                  4391
+qrd_skud_prime         MACH_QRD_SKUD_PRIME     QRD_SKUD_PRIME          4393
+guf_santaro            MACH_GUF_SANTARO        GUF_SANTARO             4395
+sheepshead             MACH_SHEEPSHEAD         SHEEPSHEAD              4396
+mx6_iwg15m_mxm         MACH_MX6_IWG15M_MXM     MX6_IWG15M_MXM          4397
+mx6_iwg15m_q7          MACH_MX6_IWG15M_Q7      MX6_IWG15M_Q7           4398
+at91sam9263if8mic      MACH_AT91SAM9263IF8MIC  AT91SAM9263IF8MIC       4399
+marcopolo              MACH_MARCOPOLO          MARCOPOLO               4401
+mx535_sdcr             MACH_MX535_SDCR         MX535_SDCR              4402
+mx53_csb2733           MACH_MX53_CSB2733       MX53_CSB2733            4403
+diva                   MACH_DIVA               DIVA                    4404
+ncr_7744               MACH_NCR_7744           NCR_7744                4405
+macallan               MACH_MACALLAN           MACALLAN                4406
+wnr3500                        MACH_WNR3500            WNR3500                 4407
+pgavrf                 MACH_PGAVRF             PGAVRF                  4408
+helios_v6              MACH_HELIOS_V6          HELIOS_V6               4409
+lcct                   MACH_LCCT               LCCT                    4410
+csndug                 MACH_CSNDUG             CSNDUG                  4411
+wandboard_imx6         MACH_WANDBOARD_IMX6     WANDBOARD_IMX6          4412
+omap4_jet              MACH_OMAP4_JET          OMAP4_JET               4413
+tegra_roth             MACH_TEGRA_ROTH         TEGRA_ROTH              4414
+m7dcg                  MACH_M7DCG              M7DCG                   4415
+m7dug                  MACH_M7DUG              M7DUG                   4416
+m7dtg                  MACH_M7DTG              M7DTG                   4417
+ap42x                  MACH_AP42X              AP42X                   4418
+var_som_mx6            MACH_VAR_SOM_MX6        VAR_SOM_MX6             4419
+pdlu                   MACH_PDLU               PDLU                    4420
+hydrogen               MACH_HYDROGEN           HYDROGEN                4421
+npa211e                        MACH_NPA211E            NPA211E                 4422
+arcadia                        MACH_ARCADIA            ARCADIA                 4423
+arcadia_l              MACH_ARCADIA_L          ARCADIA_L               4424
+msm8930dt              MACH_MSM8930DT          MSM8930DT               4425
+ktam3874               MACH_KTAM3874           KTAM3874                4426
+cec4                   MACH_CEC4               CEC4                    4427
+ape6evm                        MACH_APE6EVM            APE6EVM                 4428
+tx6                    MACH_TX6                TX6                     4429
+cfa10037               MACH_CFA10037           CFA10037                4431
+ezp1000                        MACH_EZP1000            EZP1000                 4433
+wgr826v                        MACH_WGR826V            WGR826V                 4434
+exuma                  MACH_EXUMA              EXUMA                   4435
+fregate                        MACH_FREGATE            FREGATE                 4436
+osirisimx508           MACH_OSIRISIMX508       OSIRISIMX508            4437
+st_exigo               MACH_ST_EXIGO           ST_EXIGO                4438
+pismo                  MACH_PISMO              PISMO                   4439
+atc7                   MACH_ATC7               ATC7                    4440
+nspireclp              MACH_NSPIRECLP          NSPIRECLP               4441
+nspiretp               MACH_NSPIRETP           NSPIRETP                4442
+nspirecx               MACH_NSPIRECX           NSPIRECX                4443
+maya                   MACH_MAYA               MAYA                    4444
+wecct                  MACH_WECCT              WECCT                   4445
+m2s                    MACH_M2S                M2S                     4446
+msm8625q_evbd          MACH_MSM8625Q_EVBD      MSM8625Q_EVBD           4447
+tiny210                        MACH_TINY210            TINY210                 4448
+g3                     MACH_G3                 G3                      4449
+hurricane              MACH_HURRICANE          HURRICANE               4450
+mx6_pod                        MACH_MX6_POD            MX6_POD                 4451
+elondcn                        MACH_ELONDCN            ELONDCN                 4452
+cwmx535                        MACH_CWMX535            CWMX535                 4453
+m7_wlj                 MACH_M7_WLJ             M7_WLJ                  4454
+qsp_arm                        MACH_QSP_ARM            QSP_ARM                 4455
+msm8625q_skud          MACH_MSM8625Q_SKUD      MSM8625Q_SKUD           4456
+htcmondrian            MACH_HTCMONDRIAN        HTCMONDRIAN             4457
+watson_ead             MACH_WATSON_EAD         WATSON_EAD              4458
+mitwoa                 MACH_MITWOA             MITWOA                  4459
+omap3_wolverine                MACH_OMAP3_WOLVERINE    OMAP3_WOLVERINE         4460
+mapletree              MACH_MAPLETREE          MAPLETREE               4461
+msm8625_fih_sae                MACH_MSM8625_FIH_SAE    MSM8625_FIH_SAE         4462
+epc35                  MACH_EPC35              EPC35                   4463
+smartrtu               MACH_SMARTRTU           SMARTRTU                4464
+rcm101                 MACH_RCM101             RCM101                  4465
+amx_imx53_mxx          MACH_AMX_IMX53_MXX      AMX_IMX53_MXX           4466
+acer_a12               MACH_ACER_A12           ACER_A12                4470
+sbc6x                  MACH_SBC6X              SBC6X                   4471
+u2                     MACH_U2                 U2                      4472
+smdk4270               MACH_SMDK4270           SMDK4270                4473
+priscillag             MACH_PRISCILLAG         PRISCILLAG              4474
+priscillac             MACH_PRISCILLAC         PRISCILLAC              4475
+priscilla              MACH_PRISCILLA          PRISCILLA               4476
+innova_shpu_v2         MACH_INNOVA_SHPU_V2     INNOVA_SHPU_V2          4477
+mach_type_dep2410      MACH_MACH_TYPE_DEP2410  MACH_TYPE_DEP2410       4479
+bctre3                 MACH_BCTRE3             BCTRE3                  4480
+omap_m100              MACH_OMAP_M100          OMAP_M100               4481
+flo                    MACH_FLO                FLO                     4482
+nanobone               MACH_NANOBONE           NANOBONE                4483
+stm_b2105              MACH_STM_B2105          STM_B2105               4484
+omap4_bsc_bap_v3       MACH_OMAP4_BSC_BAP_V3   OMAP4_BSC_BAP_V3        4485
+ss1pam                 MACH_SS1PAM             SS1PAM                  4486
+primominiu             MACH_PRIMOMINIU         PRIMOMINIU              4488
+mrt_35hd_dualnas_e     MACH_MRT_35HD_DUALNAS_E MRT_35HD_DUALNAS_E      4489
+kiwi                   MACH_KIWI               KIWI                    4490
+hw90496                        MACH_HW90496            HW90496                 4491
+mep2440                        MACH_MEP2440            MEP2440                 4492
+colibri_t30            MACH_COLIBRI_T30        COLIBRI_T30             4493
+cwv1                   MACH_CWV1               CWV1                    4494
+nsa325                 MACH_NSA325             NSA325                  4495
+dpxmtc                 MACH_DPXMTC             DPXMTC                  4497
+tt_stuttgart           MACH_TT_STUTTGART       TT_STUTTGART            4498
+miranda_apcii          MACH_MIRANDA_APCII      MIRANDA_APCII           4499
+mx6q_moderox           MACH_MX6Q_MODEROX       MX6Q_MODEROX            4500
+mudskipper             MACH_MUDSKIPPER         MUDSKIPPER              4501
+urania                 MACH_URANIA             URANIA                  4502
+stm_b2112              MACH_STM_B2112          STM_B2112               4503
+mx6q_ats_phoenix       MACH_MX6Q_ATS_PHOENIX   MX6Q_ATS_PHOENIX        4505
+stm_b2116              MACH_STM_B2116          STM_B2116               4506
+mythology              MACH_MYTHOLOGY          MYTHOLOGY               4507
+fc360v1                        MACH_FC360V1            FC360V1                 4508
+gps_sensor             MACH_GPS_SENSOR         GPS_SENSOR              4509
+gazelle                        MACH_GAZELLE            GAZELLE                 4510
+mpq8064_dma            MACH_MPQ8064_DMA        MPQ8064_DMA             4511
+wems_asd01             MACH_WEMS_ASD01         WEMS_ASD01              4512
+apalis_t30             MACH_APALIS_T30         APALIS_T30              4513
+armstonea9             MACH_ARMSTONEA9         ARMSTONEA9              4515
+omap_blazetablet       MACH_OMAP_BLAZETABLET   OMAP_BLAZETABLET        4516
+ar6mxq                 MACH_AR6MXQ             AR6MXQ                  4517
+ar6mxs                 MACH_AR6MXS             AR6MXS                  4518
+gwventana              MACH_GWVENTANA          GWVENTANA               4520
+igep0033               MACH_IGEP0033           IGEP0033                4521
+h52c1_concerto         MACH_H52C1_CONCERTO     H52C1_CONCERTO          4524
+fcmbrd                 MACH_FCMBRD             FCMBRD                  4525
+pcaaxs1                        MACH_PCAAXS1            PCAAXS1                 4526
+ls_orca                        MACH_LS_ORCA            LS_ORCA                 4527
+pcm051lb               MACH_PCM051LB           PCM051LB                4528
+mx6s_lp507_gvci                MACH_MX6S_LP507_GVCI    MX6S_LP507_GVCI         4529
+dido                   MACH_DIDO               DIDO                    4530
+swarco_itc3_9g20       MACH_SWARCO_ITC3_9G20   SWARCO_ITC3_9G20        4531
+robo_roady             MACH_ROBO_ROADY         ROBO_ROADY              4532
+rskrza1                        MACH_RSKRZA1            RSKRZA1                 4533
+swarco_sid             MACH_SWARCO_SID         SWARCO_SID              4534
+mx6_iwg15s_sbc         MACH_MX6_IWG15S_SBC     MX6_IWG15S_SBC          4535
+mx6q_camaro            MACH_MX6Q_CAMARO        MX6Q_CAMARO             4536
+hb6mxs                 MACH_HB6MXS             HB6MXS                  4537
+lager                  MACH_LAGER              LAGER                   4538
+lp8x4x                 MACH_LP8X4X             LP8X4X                  4539
+tegratab7              MACH_TEGRATAB7          TEGRATAB7               4540
+andromeda              MACH_ANDROMEDA          ANDROMEDA               4541
+bootes                 MACH_BOOTES             BOOTES                  4542
+nethmi                 MACH_NETHMI             NETHMI                  4543
+tegratab               MACH_TEGRATAB           TEGRATAB                4544
+som5_evb               MACH_SOM5_EVB           SOM5_EVB                4545
+venaticorum            MACH_VENATICORUM        VENATICORUM             4546
+stm_b2110              MACH_STM_B2110          STM_B2110               4547
+elux_hathor            MACH_ELUX_HATHOR        ELUX_HATHOR             4548
+helios_v7              MACH_HELIOS_V7          HELIOS_V7               4549
+xc10v1                 MACH_XC10V1             XC10V1                  4550
+cp2u                   MACH_CP2U               CP2U                    4551
+iap_f                  MACH_IAP_F              IAP_F                   4552
+iap_g                  MACH_IAP_G              IAP_G                   4553
+aae                    MACH_AAE                AAE                     4554
+pegasus                        MACH_PEGASUS            PEGASUS                 4555
+cygnus                 MACH_CYGNUS             CYGNUS                  4556
+centaurus              MACH_CENTAURUS          CENTAURUS               4557
+msm8930_qrd8930                MACH_MSM8930_QRD8930    MSM8930_QRD8930         4558
+quby_tim               MACH_QUBY_TIM           QUBY_TIM                4559
+zedi3250a              MACH_ZEDI3250A          ZEDI3250A               4560
+grus                   MACH_GRUS               GRUS                    4561
+apollo3                        MACH_APOLLO3            APOLLO3                 4562
+cowon_r7               MACH_COWON_R7           COWON_R7                4563
+tonga3                 MACH_TONGA3             TONGA3                  4564
+p535                   MACH_P535               P535                    4565
+sa3874i                        MACH_SA3874I            SA3874I                 4566
+mx6_navico_com         MACH_MX6_NAVICO_COM     MX6_NAVICO_COM          4567
+proxmobil2             MACH_PROXMOBIL2         PROXMOBIL2              4568
+ubinux1                        MACH_UBINUX1            UBINUX1                 4569
+istos                  MACH_ISTOS              ISTOS                   4570
+benvolio4              MACH_BENVOLIO4          BENVOLIO4               4571
+eco5_bx2               MACH_ECO5_BX2           ECO5_BX2                4572
+eukrea_cpuimx28sd      MACH_EUKREA_CPUIMX28SD  EUKREA_CPUIMX28SD       4573
+domotab                        MACH_DOMOTAB            DOMOTAB                 4574
+pfla03                 MACH_PFLA03             PFLA03                  4575
index cf60d0a9f1767ca8d8ba8e3e810526539022313f..fc6483f83ccca748a3dc4319bb1ee849f8435654 100644 (file)
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
 #define readw_be                       __raw_readw
 #define readl_be                       __raw_readl
 
+#define writeb_relaxed                 writeb
+#define writew_relaxed                 writew
+#define writel_relaxed                 writel
+
 #define writeb_be                      __raw_writeb
 #define writew_be                      __raw_writew
 #define writel_be                      __raw_writel
index 256c5bf0adb7a87cb0429bc02c435f3cfecbd3b5..04d69c4a5ac2d6326839d8bcf23342ff509926ce 100644 (file)
@@ -304,7 +304,7 @@ syscall_exit_work:
        subi    r12,r12,TI_FLAGS
 
 4:     /* Anything else left to do? */
-       SET_DEFAULT_THREAD_PPR(r3, r9)          /* Set thread.ppr = 3 */
+       SET_DEFAULT_THREAD_PPR(r3, r10)         /* Set thread.ppr = 3 */
        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
        beq     .ret_from_except_lite
 
@@ -657,7 +657,7 @@ resume_kernel:
        /* Clear _TIF_EMULATE_STACK_STORE flag */
        lis     r11,_TIF_EMULATE_STACK_STORE@h
        addi    r5,r9,TI_FLAGS
-       ldarx   r4,0,r5
+0:     ldarx   r4,0,r5
        andc    r4,r4,r11
        stdcx.  r4,0,r5
        bne-    0b
index 59dd545fdde188761e140c2f6969b9b9e348662b..16e77a81ab4f29d5090b3f65b600d738fe6ed9cf 100644 (file)
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
                new->thread.regs->msr |=
                        (MSR_FP | new->thread.fpexc_mode);
        }
+#ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                do_load_up_transact_altivec(&new->thread);
                new->thread.regs->msr |= MSR_VEC;
        }
+#endif
        /* We may as well turn on VSX too since all the state is restored now */
        if (msr & MSR_VSX)
                new->thread.regs->msr |= MSR_VSX;
index 3acb28e245b411d65ebcc8c68bfeec802e7f4737..95068bf569adc17cf51d7e32bd650daaba234b17 100644 (file)
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
                do_load_up_transact_fpu(&current->thread);
                regs->msr |= (MSR_FP | current->thread.fpexc_mode);
        }
+#ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                do_load_up_transact_altivec(&current->thread);
                regs->msr |= MSR_VEC;
        }
+#endif
 
        return 0;
 }
index 995f8543cb57ed869b603fee7c5d178c1d1af066..c1794286098ca2f78c5e32f4c01b0891ca04fc78 100644 (file)
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                do_load_up_transact_fpu(&current->thread);
                regs->msr |= (MSR_FP | current->thread.fpexc_mode);
        }
+#ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                do_load_up_transact_altivec(&current->thread);
                regs->msr |= MSR_VEC;
        }
+#endif
 
        return err;
 }
index 84dbace657ce197fb7a71d17e61042e87fb271c8..2da67e7a16d58650e10a0b6113b705bd88a6f2d3 100644 (file)
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
        or      r5, r6, r5                      /* Set MSR.FP+.VSX/.VEC */
        mtmsr   r5
 
+#ifdef CONFIG_ALTIVEC
        /* FP and VEC registers:  These are recheckpointed from thread.fpr[]
         * and thread.vr[] respectively.  The thread.transact_fpr[] version
         * is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
        REST_32VRS(0, r5, r3)                   /* r5 scratch, r3 THREAD ptr */
        ld      r5, THREAD_VRSAVE(r3)
        mtspr   SPRN_VRSAVE, r5
+#endif
 
 dont_restore_vec:
        andi.   r0, r4, MSR_FP
index 41cefd43655ff2616478778295072a1509d0f1ad..33db48a8ce241e1ee9ee0bce50bf991bb550b332 100644 (file)
 #define E500_PID_NUM   3
 #define E500_TLB_NUM   2
 
-#define E500_TLB_VALID 1
-#define E500_TLB_BITMAP 2
+/* entry is mapped somewhere in host TLB */
+#define E500_TLB_VALID         (1 << 0)
+/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
+#define E500_TLB_BITMAP                (1 << 1)
+/* TLB1 entry is mapped by host TLB0 */
 #define E500_TLB_TLB0          (1 << 2)
 
 struct tlbe_ref {
-       pfn_t pfn;
-       unsigned int flags; /* E500_TLB_* */
+       pfn_t pfn;              /* valid only for TLB0, except briefly */
+       unsigned int flags;     /* E500_TLB_* */
 };
 
 struct tlbe_priv {
-       struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+       struct tlbe_ref ref;
 };
 
 #ifdef CONFIG_KVM_E500V2
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
 
        unsigned int gtlb_nv[E500_TLB_NUM];
 
-       /*
-        * information associated with each host TLB entry --
-        * TLB1 only for now.  If/when guest TLB1 entries can be
-        * mapped with host TLB0, this will be used for that too.
-        *
-        * We don't want to use this for guest TLB0 because then we'd
-        * have the overhead of doing the translation again even if
-        * the entry is still in the guest TLB (e.g. we swapped out
-        * and back, and our host TLB entries got evicted).
-        */
-       struct tlbe_ref *tlb_refs[E500_TLB_NUM];
        unsigned int host_tlb1_nv;
 
        u32 svr;
index a222edfb9a9bce72e7d642b05853398b493bc679..1c6a9d729df4a26ca3ad6c9f1c94008d681f7d42 100644 (file)
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
 
        /* Don't bother with unmapped entries */
-       if (!(ref->flags & E500_TLB_VALID))
-               return;
+       if (!(ref->flags & E500_TLB_VALID)) {
+               WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
+                    "%s: flags %x\n", __func__, ref->flags);
+               WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
+       }
 
        if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
                u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
                                         pfn_t pfn)
 {
        ref->pfn = pfn;
-       ref->flags = E500_TLB_VALID;
+       ref->flags |= E500_TLB_VALID;
 
        if (tlbe_is_writable(gtlbe))
                kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 {
        if (ref->flags & E500_TLB_VALID) {
+               /* FIXME: don't log bogus pfn for TLB1 */
                trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
                ref->flags = 0;
        }
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 
 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
-       int tlbsel = 0;
-       int i;
-
-       for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
-               struct tlbe_ref *ref =
-                       &vcpu_e500->gtlb_priv[tlbsel][i].ref;
-               kvmppc_e500_ref_release(ref);
-       }
-}
-
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       int stlbsel = 1;
+       int tlbsel;
        int i;
 
-       kvmppc_e500_tlbil_all(vcpu_e500);
-
-       for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
-               struct tlbe_ref *ref =
-                       &vcpu_e500->tlb_refs[stlbsel][i];
-               kvmppc_e500_ref_release(ref);
+       for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
+               for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+                       struct tlbe_ref *ref =
+                               &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+                       kvmppc_e500_ref_release(ref);
+               }
        }
-
-       clear_tlb_privs(vcpu_e500);
 }
 
 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-       clear_tlb_refs(vcpu_e500);
+       kvmppc_e500_tlbil_all(vcpu_e500);
+       clear_tlb_privs(vcpu_e500);
        clear_tlb1_bitmap(vcpu_e500);
 }
 
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
        }
 
-       /* Drop old ref and setup new one. */
-       kvmppc_e500_ref_release(ref);
        kvmppc_e500_ref_setup(ref, gtlbe, pfn);
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
        if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
                vcpu_e500->host_tlb1_nv = 0;
 
-       vcpu_e500->tlb_refs[1][sesel] = *ref;
-       vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
-       vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
        if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
-               unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
+               unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
                vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
        }
-       vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
+
+       vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+       vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+       vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
+       WARN_ON(!(ref->flags & E500_TLB_VALID));
 
        return sesel;
 }
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
                struct kvm_book3e_206_tlb_entry *stlbe, int esel)
 {
-       struct tlbe_ref ref;
+       struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
        int sesel;
        int r;
 
-       ref.flags = 0;
        r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
-                                  &ref);
+                                  ref);
        if (r)
                return r;
 
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        }
 
        /* Otherwise map into TLB1 */
-       sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
+       sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
        write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
 
        return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
        case 0:
                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
-               /* Triggers after clear_tlb_refs or on initial mapping */
+               /* Triggers after clear_tlb_privs or on initial mapping */
                if (!(priv->ref.flags & E500_TLB_VALID)) {
                        kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
                } else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
                host_tlb_params[0].entries / host_tlb_params[0].ways;
        host_tlb_params[1].sets = 1;
 
-       vcpu_e500->tlb_refs[0] =
-               kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
-                       GFP_KERNEL);
-       if (!vcpu_e500->tlb_refs[0])
-               goto err;
-
-       vcpu_e500->tlb_refs[1] =
-               kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
-                       GFP_KERNEL);
-       if (!vcpu_e500->tlb_refs[1])
-               goto err;
-
        vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
                                           host_tlb_params[1].entries,
                                           GFP_KERNEL);
        if (!vcpu_e500->h2g_tlb1_rmap)
-               goto err;
+               return -EINVAL;
 
        return 0;
-
-err:
-       kfree(vcpu_e500->tlb_refs[0]);
-       kfree(vcpu_e500->tlb_refs[1]);
-       return -EINVAL;
 }
 
 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        kfree(vcpu_e500->h2g_tlb1_rmap);
-       kfree(vcpu_e500->tlb_refs[0]);
-       kfree(vcpu_e500->tlb_refs[1]);
 }
index 1f89d26e65fb72534e130ed755bfe3874740316f..2f4baa074b2ebf0bf9cfe497055afbe9460ec4a5 100644 (file)
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
 {
 }
 
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
        mtspr(SPRN_GESR, vcpu->arch.shared->esr);
 
-       if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+       if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
+           __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
                kvmppc_e500_tlbil_all(vcpu_e500);
+               __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+       }
 
        kvmppc_load_guest_fp(vcpu);
 }
index 27cb32185ce1d950e5a6097198a1864a69e6594b..379d96e2105ea1d60f79a4cd881593a188a61c01 100644 (file)
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 #define ioremap_nocache(addr, size)    ioremap(addr, size)
 #define ioremap_wc                     ioremap_nocache
 
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)               \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
        return (void __iomem *) offset;
index 4a5443118cfb039d4b5e53f63b7470c292c4a431..3cb47cf02530b6f390af7ac65c0ac92f89cf275c 100644 (file)
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
         (((unsigned long)(vaddr)) &zero_page_mask))))
 #define __HAVE_COLOR_ZERO_PAGE
 
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)               \
+       remap_pfn_range(vma, vaddr, pfn, size, prot)
+
 #endif /* !__ASSEMBLY__ */
 
 /*
index e26d430ce2fdb94b7c85281db75367acf41e2321..ff18e3cfb6b1ffe3e75df94fdd68319b912ab1d8 100644 (file)
@@ -2,11 +2,16 @@
 
 
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += div64.h
+generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += local64.h
+generic-y += mutex.h
 generic-y += irq_regs.h
 generic-y += local.h
 generic-y += module.h
+generic-y += serial.h
 generic-y += trace_clock.h
+generic-y += types.h
 generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h
deleted file mode 100644 (file)
index 1a642b8..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_CPUTIME_H
-#define __SPARC_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SPARC_CPUTIME_H */
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h
deleted file mode 100644 (file)
index 458c1f7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
index 08fcce90316b36654dea14af5525a94a0cded6e8..7619f2f792aff549905ca49d1a70b3cd7514979c 100644 (file)
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 }
 
+#include <asm/tlbflush.h>
 #include <asm-generic/pgtable.h>
 
 /* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h
deleted file mode 100644 (file)
index f90d61c..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_SERIAL_H
-#define __SPARC_SERIAL_H
-
-#define BASE_BAUD ( 1843200 / 16 )
-
-#endif /* __SPARC_SERIAL_H */
index b73da3c5f10a9140ba730836d3e496af3e2358c6..3c8917f054dee450fcc48e7f50d189b88bc19401 100644 (file)
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
                       unsigned long, unsigned long);
 
 void cpu_panic(void);
-extern void smp4m_irq_rotate(int cpu);
 
 /*
  *     General functions that each host system must provide.
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
 void sun4d_init_smp(void);
 
 void smp_callin(void);
-void smp_boot_cpus(void);
 void smp_store_cpu_info(int);
 
 void smp_resched_interrupt(void);
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
 
 #define raw_smp_processor_id()         (current_thread_info()->cpu)
 
-#define prof_multiplier(__cpu)         cpu_data(__cpu).multiplier
-#define prof_counter(__cpu)            cpu_data(__cpu).counter
-
 void smp_setup_cpu_possible_map(void);
 
 #endif /* !(__ASSEMBLY__) */
index cad36f56fa03e6605797c4dca13f5127048b47d7..c7de3323819c5389a8c1855f09aa542cd67f4d41 100644 (file)
@@ -18,8 +18,7 @@ do {                                          \
         * and 2 stores in this critical code path.  -DaveM
         */
 #define switch_to(prev, next, last)                                    \
-do {   flush_tlb_pending();                                            \
-       save_and_clear_fpu();                                           \
+do {   save_and_clear_fpu();                                           \
        /* If you are tempted to conditionalize the following */        \
        /* so that ASI is only written if it changes, think again. */   \
        __asm__ __volatile__("wr %%g0, %0, %%asi"                       \
index 2ef463494153a65adec7c7a59a37c0095fda2649..f0d6a9700f4c8351e20be4743d9782c590b9e016 100644 (file)
 struct tlb_batch {
        struct mm_struct *mm;
        unsigned long tlb_nr;
+       unsigned long active;
        unsigned long vaddrs[TLB_BATCH_NR];
 };
 
 extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
 extern void flush_tsb_user(struct tlb_batch *tb);
+extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
 
 /* TLB flush operations. */
 
-extern void flush_tlb_pending(void);
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+                                 unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
-#define flush_tlb_range(vma,start,end) \
-       do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr)       flush_tlb_pending()
-#define flush_tlb_mm(mm)               flush_tlb_pending()
+extern void flush_tlb_pending(void);
+extern void arch_enter_lazy_mmu_mode(void);
+extern void arch_leave_lazy_mmu_mode(void);
+#define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 /* Local cpu only.  */
 extern void __flush_tlb_all(void);
-
+extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do {        flush_tsb_kernel_range(start,end); \
        __flush_tlb_kernel_range(start,end); \
 } while (0)
 
+static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+}
+
 #else /* CONFIG_SMP */
 
 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
 
 #define flush_tlb_kernel_range(start, end) \
 do {   flush_tsb_kernel_range(start,end); \
        smp_flush_tlb_kernel_range(start, end); \
 } while (0)
 
+#define global_flush_tlb_page(mm, vaddr) \
+       smp_flush_tlb_page(mm, vaddr)
+
 #endif /* ! CONFIG_SMP */
 
 #endif /* _SPARC64_TLBFLUSH_H */
index ce175aff71b75e29fdf65cf70d8a17f2a07c5d2a..b5843ee09fb53a6e4438b29140adbd34d9c887c1 100644 (file)
@@ -44,7 +44,6 @@ header-y += swab.h
 header-y += termbits.h
 header-y += termios.h
 header-y += traps.h
-header-y += types.h
 header-y += uctx.h
 header-y += unistd.h
 header-y += utrap.h
diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h
deleted file mode 100644 (file)
index 383d156..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _SPARC_TYPES_H
-#define _SPARC_TYPES_H
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue.  However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-#if defined(__sparc__)
-
-#include <asm-generic/int-ll64.h>
-
-#endif /* defined(__sparc__) */
-
-#endif /* defined(_SPARC_TYPES_H) */
index 537eb66abd0654054aef7402ffaf857ad3971a85..ca64d2a86ec03e4189b88367906456e99de77a14 100644 (file)
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
 }
 
 extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_page;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_fetch_glob_regs;
 extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1074,56 @@ local_flush_and_out:
        put_cpu();
 }
 
+struct tlb_pending_info {
+       unsigned long ctx;
+       unsigned long nr;
+       unsigned long *vaddrs;
+};
+
+static void tlb_pending_func(void *info)
+{
+       struct tlb_pending_info *t = info;
+
+       __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
+}
+
 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
 {
        u32 ctx = CTX_HWBITS(mm->context);
+       struct tlb_pending_info info;
        int cpu = get_cpu();
 
+       info.ctx = ctx;
+       info.nr = nr;
+       info.vaddrs = vaddrs;
+
        if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
                cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
        else
-               smp_cross_call_masked(&xcall_flush_tlb_pending,
-                                     ctx, nr, (unsigned long) vaddrs,
-                                     mm_cpumask(mm));
+               smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+                                      &info, 1);
 
        __flush_tlb_pending(ctx, nr, vaddrs);
 
        put_cpu();
 }
 
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       unsigned long context = CTX_HWBITS(mm->context);
+       int cpu = get_cpu();
+
+       if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+       else
+               smp_cross_call_masked(&xcall_flush_tlb_page,
+                                     context, vaddr, 0,
+                                     mm_cpumask(mm));
+       __flush_tlb_page(context, vaddr);
+
+       put_cpu();
+}
+
 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        start &= PAGE_MASK;
index 48d00e72ce15ba41304f8390ab29324402f052b2..8ec4e9c0251a3e414acf58327b0fd167890caa95 100644 (file)
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
 
 void bit_map_init(struct bit_map *t, unsigned long *map, int size)
 {
-
-       if ((size & 07) != 0)
-               BUG();
-       memset(map, 0, size>>3);
-
+       bitmap_zero(map, size);
        memset(t, 0, sizeof *t);
        spin_lock_init(&t->lock);
        t->map = map;
index 0f4f7191fbbad93d6600ac409f6baae24c911d5e..28f96f27c7683e1201d5ca6e53409e391594aff4 100644 (file)
@@ -34,7 +34,7 @@
 #define IOMMU_RNGE     IOMMU_RNGE_256MB
 #define IOMMU_START    0xF0000000
 #define IOMMU_WINSIZE  (256*1024*1024U)
-#define IOMMU_NPTES    (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 265KB */
+#define IOMMU_NPTES    (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 256KB */
 #define IOMMU_ORDER    6                               /* 4096 * (1<<6) */
 
 /* srmmu.c */
index c38bb72e3e80e4c122ea469a4edaba8e18cc3939..036c2797dece1c6443894b9e3ce95c066e4142fe 100644 (file)
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
                SRMMU_NOCACHE_ALIGN_MAX, 0UL);
        memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
 
-       srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
+       srmmu_nocache_bitmap =
+               __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+                               SMP_CACHE_BYTES, 0UL);
        bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
 
        srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
index ba6ae7ffdc2c9d5d1e3bbbb8d2af3b68a2f2572e..272aa4f7657e2c3bbba5772809e8798c657a32c8 100644 (file)
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 void flush_tlb_pending(void)
 {
        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+       struct mm_struct *mm = tb->mm;
 
-       if (tb->tlb_nr) {
-               flush_tsb_user(tb);
+       if (!tb->tlb_nr)
+               goto out;
 
-               if (CTX_VALID(tb->mm->context)) {
+       flush_tsb_user(tb);
+
+       if (CTX_VALID(mm->context)) {
+               if (tb->tlb_nr == 1) {
+                       global_flush_tlb_page(mm, tb->vaddrs[0]);
+               } else {
 #ifdef CONFIG_SMP
                        smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
                                              &tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
                                            tb->tlb_nr, &tb->vaddrs[0]);
 #endif
                }
-               tb->tlb_nr = 0;
        }
 
+       tb->tlb_nr = 0;
+
+out:
        put_cpu_var(tlb_batch);
 }
 
+void arch_enter_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+       tb->active = 1;
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+       if (tb->tlb_nr)
+               flush_tlb_pending();
+       tb->active = 0;
+}
+
 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
                              bool exec)
 {
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
                nr = 0;
        }
 
+       if (!tb->active) {
+               global_flush_tlb_page(mm, vaddr);
+               flush_tsb_user_page(mm, vaddr);
+               return;
+       }
+
        if (nr == 0)
                tb->mm = mm;
 
index 428982b9becfe267b33ad2745efb36b5dc861a11..2cc3bce5ee914a158a16960c4ece028cc959b83a 100644 (file)
@@ -7,11 +7,10 @@
 #include <linux/preempt.h>
 #include <linux/slab.h>
 #include <asm/page.h>
-#include <asm/tlbflush.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/mmu_context.h>
 #include <asm/tsb.h>
+#include <asm/tlb.h>
 #include <asm/oplib.h>
 
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
        }
 }
 
-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
-                           unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
+                                 unsigned long hash_shift,
+                                 unsigned long nentries)
 {
-       unsigned long i;
+       unsigned long tag, ent, hash;
 
-       for (i = 0; i < tb->tlb_nr; i++) {
-               unsigned long v = tb->vaddrs[i];
-               unsigned long tag, ent, hash;
+       v &= ~0x1UL;
+       hash = tsb_hash(v, hash_shift, nentries);
+       ent = tsb + (hash * sizeof(struct tsb));
+       tag = (v >> 22UL);
 
-               v &= ~0x1UL;
+       tsb_flush(ent, tag);
+}
 
-               hash = tsb_hash(v, hash_shift, nentries);
-               ent = tsb + (hash * sizeof(struct tsb));
-               tag = (v >> 22UL);
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+                           unsigned long tsb, unsigned long nentries)
+{
+       unsigned long i;
 
-               tsb_flush(ent, tag);
-       }
+       for (i = 0; i < tb->tlb_nr; i++)
+               __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
 }
 
 void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
        spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       unsigned long nentries, base, flags;
+
+       spin_lock_irqsave(&mm->context.lock, flags);
+
+       base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+       nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+       if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+               base = __pa(base);
+       __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+               base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+               nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+               if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+                       base = __pa(base);
+               __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+       }
+#endif
+       spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
 #define HV_PGSZ_IDX_BASE       HV_PGSZ_IDX_8K
 #define HV_PGSZ_MASK_BASE      HV_PGSZ_MASK_8K
 
index f8e13d421fcbf415dd49a50bc2ed4aaf2b3b6494..432aa0cb1b38caa75076938700c60761699dc920 100644 (file)
@@ -52,6 +52,33 @@ __flush_tlb_mm:              /* 18 insns */
        nop
        nop
 
+       .align          32
+       .globl          __flush_tlb_page
+__flush_tlb_page:      /* 22 insns */
+       /* %o0 = context, %o1 = vaddr */
+       rdpr            %pstate, %g7
+       andn            %g7, PSTATE_IE, %g2
+       wrpr            %g2, %pstate
+       mov             SECONDARY_CONTEXT, %o4
+       ldxa            [%o4] ASI_DMMU, %g2
+       stxa            %o0, [%o4] ASI_DMMU
+       andcc           %o1, 1, %g0
+       andn            %o1, 1, %o3
+       be,pn           %icc, 1f
+        or             %o3, 0x10, %o3
+       stxa            %g0, [%o3] ASI_IMMU_DEMAP
+1:     stxa            %g0, [%o3] ASI_DMMU_DEMAP
+       membar          #Sync
+       stxa            %g2, [%o4] ASI_DMMU
+       sethi           %hi(KERNBASE), %o4
+       flush           %o4
+       retl
+        wrpr           %g7, 0x0, %pstate
+       nop
+       nop
+       nop
+       nop
+
        .align          32
        .globl          __flush_tlb_pending
 __flush_tlb_pending:   /* 26 insns */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
        retl
         wrpr           %g7, 0x0, %pstate
 
+__cheetah_flush_tlb_page:      /* 22 insns */
+       /* %o0 = context, %o1 = vaddr */
+       rdpr            %pstate, %g7
+       andn            %g7, PSTATE_IE, %g2
+       wrpr            %g2, 0x0, %pstate
+       wrpr            %g0, 1, %tl
+       mov             PRIMARY_CONTEXT, %o4
+       ldxa            [%o4] ASI_DMMU, %g2
+       srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+       sllx            %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+       or              %o0, %o3, %o0   /* Preserve nucleus page size fields */
+       stxa            %o0, [%o4] ASI_DMMU
+       andcc           %o1, 1, %g0
+       be,pn           %icc, 1f
+        andn           %o1, 1, %o3
+       stxa            %g0, [%o3] ASI_IMMU_DEMAP
+1:     stxa            %g0, [%o3] ASI_DMMU_DEMAP       
+       membar          #Sync
+       stxa            %g2, [%o4] ASI_DMMU
+       sethi           %hi(KERNBASE), %o4
+       flush           %o4
+       wrpr            %g0, 0, %tl
+       retl
+        wrpr           %g7, 0x0, %pstate
+
 __cheetah_flush_tlb_pending:   /* 27 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
        rdpr            %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
        retl
         nop
 
+__hypervisor_flush_tlb_page: /* 11 insns */
+       /* %o0 = context, %o1 = vaddr */
+       mov             %o0, %g2
+       mov             %o1, %o0              /* ARG0: vaddr + IMMU-bit */
+       mov             %g2, %o1              /* ARG1: mmu context */
+       mov             HV_MMU_ALL, %o2       /* ARG2: flags */
+       srlx            %o0, PAGE_SHIFT, %o0
+       sllx            %o0, PAGE_SHIFT, %o0
+       ta              HV_MMU_UNMAP_ADDR_TRAP
+       brnz,pn         %o0, __hypervisor_tlb_tl0_error
+        mov            HV_MMU_UNMAP_ADDR_TRAP, %o1
+       retl
+        nop
+
 __hypervisor_flush_tlb_pending: /* 16 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
        sllx            %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
        call            tlb_patch_one
         mov            19, %o2
 
+       sethi           %hi(__flush_tlb_page), %o0
+       or              %o0, %lo(__flush_tlb_page), %o0
+       sethi           %hi(__cheetah_flush_tlb_page), %o1
+       or              %o1, %lo(__cheetah_flush_tlb_page), %o1
+       call            tlb_patch_one
+        mov            22, %o2
+
        sethi           %hi(__flush_tlb_pending), %o0
        or              %o0, %lo(__flush_tlb_pending), %o0
        sethi           %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm:        /* 21 insns */
        nop
        nop
 
-       .globl          xcall_flush_tlb_pending
-xcall_flush_tlb_pending:       /* 21 insns */
-       /* %g5=context, %g1=nr, %g7=vaddrs[] */
-       sllx            %g1, 3, %g1
+       .globl          xcall_flush_tlb_page
+xcall_flush_tlb_page:  /* 17 insns */
+       /* %g5=context, %g1=vaddr */
        mov             PRIMARY_CONTEXT, %g4
        ldxa            [%g4] ASI_DMMU, %g2
        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending:  /* 21 insns */
        or              %g5, %g4, %g5
        mov             PRIMARY_CONTEXT, %g4
        stxa            %g5, [%g4] ASI_DMMU
-1:     sub             %g1, (1 << 3), %g1
-       ldx             [%g7 + %g1], %g5
-       andcc           %g5, 0x1, %g0
+       andcc           %g1, 0x1, %g0
        be,pn           %icc, 2f
-
-        andn           %g5, 0x1, %g5
+        andn           %g1, 0x1, %g5
        stxa            %g0, [%g5] ASI_IMMU_DEMAP
 2:     stxa            %g0, [%g5] ASI_DMMU_DEMAP
        membar          #Sync
-       brnz,pt         %g1, 1b
-        nop
        stxa            %g2, [%g4] ASI_DMMU
        retry
        nop
+       nop
 
        .globl          xcall_flush_tlb_kernel_range
 xcall_flush_tlb_kernel_range:  /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
        membar          #Sync
        retry
 
-       .globl          __hypervisor_xcall_flush_tlb_pending
-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
-       /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
-       sllx            %g1, 3, %g1
+       .globl          __hypervisor_xcall_flush_tlb_page
+__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+       /* %g5=ctx, %g1=vaddr */
        mov             %o0, %g2
        mov             %o1, %g3
        mov             %o2, %g4
-1:     sub             %g1, (1 << 3), %g1
-       ldx             [%g7 + %g1], %o0        /* ARG0: virtual address */
+       mov             %g1, %o0                /* ARG0: virtual address */
        mov             %g5, %o1                /* ARG1: mmu context */
        mov             HV_MMU_ALL, %o2         /* ARG2: flags */
        srlx            %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
        mov             HV_MMU_UNMAP_ADDR_TRAP, %g6
        brnz,a,pn       %o0, __hypervisor_tlb_xcall_error
         mov            %o0, %g5
-       brnz,pt         %g1, 1b
-        nop
        mov             %g2, %o0
        mov             %g3, %o1
        mov             %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
        call            tlb_patch_one
         mov            10, %o2
 
+       sethi           %hi(__flush_tlb_page), %o0
+       or              %o0, %lo(__flush_tlb_page), %o0
+       sethi           %hi(__hypervisor_flush_tlb_page), %o1
+       or              %o1, %lo(__hypervisor_flush_tlb_page), %o1
+       call            tlb_patch_one
+        mov            11, %o2
+
        sethi           %hi(__flush_tlb_pending), %o0
        or              %o0, %lo(__flush_tlb_pending), %o0
        sethi           %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
        call            tlb_patch_one
         mov            21, %o2
 
-       sethi           %hi(xcall_flush_tlb_pending), %o0
-       or              %o0, %lo(xcall_flush_tlb_pending), %o0
-       sethi           %hi(__hypervisor_xcall_flush_tlb_pending), %o1
-       or              %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
+       sethi           %hi(xcall_flush_tlb_page), %o0
+       or              %o0, %lo(xcall_flush_tlb_page), %o0
+       sethi           %hi(__hypervisor_xcall_flush_tlb_page), %o1
+       or              %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
        call            tlb_patch_one
-        mov            21, %o2
+        mov            17, %o2
 
        sethi           %hi(xcall_flush_tlb_kernel_range), %o0
        or              %o0, %lo(xcall_flush_tlb_kernel_range), %o0
index 70c0f3da0476a35ba93546689bb4b06b5f576bba..15b5cef4aa3857a386cb77cffd2ba74243b532c7 100644 (file)
@@ -1549,6 +1549,7 @@ config X86_SMAP
 config EFI
        bool "EFI runtime service support"
        depends on ACPI
+       select UCS2_STRING
        ---help---
          This enables the kernel to use EFI runtime services that are
          available (such as the EFI variable services).
index c205035a6b96b836ef683ddf8592dbc9c82200e6..8615f7581820d44ef7cd652c8ef2662310ac5122 100644 (file)
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
        *size = len;
 }
 
+static efi_status_t setup_efi_vars(struct boot_params *params)
+{
+       struct setup_data *data;
+       struct efi_var_bootdata *efidata;
+       u64 store_size, remaining_size, var_size;
+       efi_status_t status;
+
+       if (!sys_table->runtime->query_variable_info)
+               return EFI_UNSUPPORTED;
+
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       status = efi_call_phys4(sys_table->runtime->query_variable_info,
+                               EFI_VARIABLE_NON_VOLATILE |
+                               EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                               EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
+                               &remaining_size, &var_size);
+
+       if (status != EFI_SUCCESS)
+               return status;
+
+       status = efi_call_phys3(sys_table->boottime->allocate_pool,
+                               EFI_LOADER_DATA, sizeof(*efidata), &efidata);
+
+       if (status != EFI_SUCCESS)
+               return status;
+
+       efidata->data.type = SETUP_EFI_VARS;
+       efidata->data.len = sizeof(struct efi_var_bootdata) -
+               sizeof(struct setup_data);
+       efidata->data.next = 0;
+       efidata->store_size = store_size;
+       efidata->remaining_size = remaining_size;
+       efidata->max_var_size = var_size;
+
+       if (data)
+               data->next = (unsigned long)efidata;
+       else
+               params->hdr.setup_data = (unsigned long)efidata;
+
+}
+
 static efi_status_t setup_efi_pci(struct boot_params *params)
 {
        efi_pci_io_protocol *pci;
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
 
        setup_graphics(boot_params);
 
+       setup_efi_vars(boot_params);
+
        setup_efi_pci(boot_params);
 
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
index 60c89f30c727458df128543a6302e95aeede8758..2fb5d5884e2331b7d6d299621bfb5c91fa701bf2 100644 (file)
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
 extern void efi_unmap_memmap(void);
 extern void efi_memory_uc(u64 addr, unsigned long size);
 
+struct efi_var_bootdata {
+       struct setup_data data;
+       u64 store_size;
+       u64 remaining_size;
+       u64 max_var_size;
+};
+
 #ifdef CONFIG_EFI
 
 static inline bool efi_is_native(void)
index c15ddaf907107134d6cd2f8d86f554e510a6f848..08744242b8d24c9111d2275ff6d84752d725584e 100644 (file)
@@ -6,6 +6,7 @@
 #define SETUP_E820_EXT                 1
 #define SETUP_DTB                      2
 #define SETUP_PCI                      3
+#define SETUP_EFI_VARS                 4
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
index a7d26d83fb700b254eca03d9ff5e8f6d56f7a0c5..8f4be53ea04b84f4eaa7a78a4bcd6e6e625c9afc 100644 (file)
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
        if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return false;
 
-       /*
-        * Xen emulates Hyper-V to support enlightened Windows.
-        * Check to see first if we are on a Xen Hypervisor.
-        */
-       if (xen_cpuid_base())
-               return false;
-
        cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
              &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
 
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
 
        if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
                clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
-#if IS_ENABLED(CONFIG_HYPERV)
-       /*
-        * Setup the IDT for hypervisor callback.
-        */
-       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
-#endif
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
 
 void hv_register_vmbus_handler(int irq, irq_handler_t handler)
 {
+       /*
+        * Setup the IDT for hypervisor callback.
+        */
+       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+
        vmbus_irq = irq;
        vmbus_isr = handler;
 }
index dab7580c47aee2e71e501afbfa94c17e2790777d..cc45deb791b01d103c01cbc527f8bfd023d51879 100644 (file)
@@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
 };
 
 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
-       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
-       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
+       EVENT_EXTRA_END
+};
+
+static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
        EVENT_EXTRA_END
 };
 
@@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               x86_pmu.extra_regs = intel_snb_extra_regs;
+               if (boot_cpu_data.x86_model == 45)
+                       x86_pmu.extra_regs = intel_snbep_extra_regs;
+               else
+                       x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
@@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_ivb_event_constraints;
                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               x86_pmu.extra_regs = intel_snb_extra_regs;
+               if (boot_cpu_data.x86_model == 62)
+                       x86_pmu.extra_regs = intel_snbep_extra_regs;
+               else
+                       x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
index 577db8417d15b41fd71cce1f0bc813ba959c5b70..833d51d6ee065b908fa70ac1075a3acf6d2721a4 100644 (file)
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
        u32 eax = 0x00000000;
        u32 ebx, ecx = 0, edx;
 
-       if (!have_cpuid_p())
-               return X86_VENDOR_UNKNOWN;
-
        native_cpuid(&eax, &ebx, &ecx, &edx);
 
        if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
        return X86_VENDOR_UNKNOWN;
 }
 
+static int __cpuinit x86_family(void)
+{
+       u32 eax = 0x00000001;
+       u32 ebx, ecx = 0, edx;
+       int x86;
+
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       x86 = (eax >> 8) & 0xf;
+       if (x86 == 15)
+               x86 += (eax >> 20) & 0xff;
+
+       return x86;
+}
+
 void __init load_ucode_bsp(void)
 {
-       int vendor = x86_vendor();
+       int vendor, x86;
+
+       if (!have_cpuid_p())
+               return;
 
-       if (vendor == X86_VENDOR_INTEL)
+       vendor = x86_vendor();
+       x86 = x86_family();
+
+       if (vendor == X86_VENDOR_INTEL && x86 >= 6)
                load_ucode_intel_bsp();
 }
 
 void __cpuinit load_ucode_ap(void)
 {
-       int vendor = x86_vendor();
+       int vendor, x86;
+
+       if (!have_cpuid_p())
+               return;
+
+       vendor = x86_vendor();
+       x86 = x86_family();
 
-       if (vendor == X86_VENDOR_INTEL)
+       if (vendor == X86_VENDOR_INTEL && x86 >= 6)
                load_ucode_intel_ap();
 }
index 90d8cc930f5ed134735f7697e016a83e5657dcb8..fae9134a2de91f7b420fb554bb33f356de661f8f 100644 (file)
@@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
 /*
  * Keep the crash kernel below this limit.  On 32 bits earlier kernels
  * would limit the kernel to the low 512 MiB due to mapping restrictions.
+ * On 64bit, old kexec-tools need to under 896MiB.
  */
 #ifdef CONFIG_X86_32
-# define CRASH_KERNEL_ADDR_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_LOW_MAX     (512 << 20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX    (512 << 20)
 #else
-# define CRASH_KERNEL_ADDR_MAX MAXMEM
+# define CRASH_KERNEL_ADDR_LOW_MAX     (896UL<<20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX    MAXMEM
 #endif
 
 static void __init reserve_crashkernel_low(void)
@@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
        unsigned long long low_base = 0, low_size = 0;
        unsigned long total_low_mem;
        unsigned long long base;
+       bool auto_set = false;
        int ret;
 
        total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
+       /* crashkernel=Y,low */
        ret = parse_crashkernel_low(boot_command_line, total_low_mem,
                                                &low_size, &base);
-       if (ret != 0 || low_size <= 0)
-               return;
+       if (ret != 0) {
+               /*
+                * two parts from lib/swiotlb.c:
+                *      swiotlb size: user specified with swiotlb= or default.
+                *      swiotlb overflow buffer: now is hardcoded to 32k.
+                *              We round it to 8M for other buffers that
+                *              may need to stay low too.
+                */
+               low_size = swiotlb_size_or_default() + (8UL<<20);
+               auto_set = true;
+       } else {
+               /* passed with crashkernel=0,low ? */
+               if (!low_size)
+                       return;
+       }
 
        low_base = memblock_find_in_range(low_size, (1ULL<<32),
                                        low_size, alignment);
 
        if (!low_base) {
-               pr_info("crashkernel low reservation failed - No suitable area found.\n");
+               if (!auto_set)
+                       pr_info("crashkernel low reservation failed - No suitable area found.\n");
 
                return;
        }
@@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)
        const unsigned long long alignment = 16<<20;    /* 16M */
        unsigned long long total_mem;
        unsigned long long crash_size, crash_base;
+       bool high = false;
        int ret;
 
        total_mem = memblock_phys_mem_size();
 
+       /* crashkernel=XM */
        ret = parse_crashkernel(boot_command_line, total_mem,
                        &crash_size, &crash_base);
-       if (ret != 0 || crash_size <= 0)
-               return;
+       if (ret != 0 || crash_size <= 0) {
+               /* crashkernel=X,high */
+               ret = parse_crashkernel_high(boot_command_line, total_mem,
+                               &crash_size, &crash_base);
+               if (ret != 0 || crash_size <= 0)
+                       return;
+               high = true;
+       }
 
        /* 0 means: find the address automatically */
        if (crash_base <= 0) {
@@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)
                 *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
                 */
                crash_base = memblock_find_in_range(alignment,
-                              CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
+                                       high ? CRASH_KERNEL_ADDR_HIGH_MAX :
+                                              CRASH_KERNEL_ADDR_LOW_MAX,
+                                       crash_size, alignment);
 
                if (!crash_base) {
                        pr_info("crashkernel reservation failed - No suitable area found.\n");
index 5f2ecaf3f9d8609ccdc7a56e4d232fd13bdd8dcc..e4a86a677ce163ec5f911fb14db65590d813aee1 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/io.h>
 #include <linux/reboot.h>
 #include <linux/bcd.h>
+#include <linux/ucs2_string.h>
 
 #include <asm/setup.h>
 #include <asm/efi.h>
 
 #define EFI_DEBUG      1
 
+/*
+ * There's some additional metadata associated with each
+ * variable. Intel's reference implementation is 60 bytes - bump that
+ * to account for potential alignment constraints
+ */
+#define VAR_METADATA_SIZE 64
+
 struct efi __read_mostly efi = {
        .mps        = EFI_INVALID_TABLE_ADDR,
        .acpi       = EFI_INVALID_TABLE_ADDR,
@@ -69,6 +77,13 @@ struct efi_memory_map memmap;
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
+static u64 efi_var_store_size;
+static u64 efi_var_remaining_size;
+static u64 efi_var_max_var_size;
+static u64 boot_used_size;
+static u64 boot_var_size;
+static u64 active_size;
+
 unsigned long x86_efi_facility;
 
 /*
@@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)
 }
 early_param("add_efi_memmap", setup_add_efi_memmap);
 
+static bool efi_no_storage_paranoia;
+
+static int __init setup_storage_paranoia(char *arg)
+{
+       efi_no_storage_paranoia = true;
+       return 0;
+}
+early_param("efi_no_storage_paranoia", setup_storage_paranoia);
+
 
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
@@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
                                               efi_char16_t *name,
                                               efi_guid_t *vendor)
 {
-       return efi_call_virt3(get_next_variable,
-                             name_size, name, vendor);
+       efi_status_t status;
+       static bool finished = false;
+       static u64 var_size;
+
+       status = efi_call_virt3(get_next_variable,
+                               name_size, name, vendor);
+
+       if (status == EFI_NOT_FOUND) {
+               finished = true;
+               if (var_size < boot_used_size) {
+                       boot_var_size = boot_used_size - var_size;
+                       active_size += boot_var_size;
+               } else {
+                       printk(KERN_WARNING FW_BUG  "efi: Inconsistent initial sizes\n");
+               }
+       }
+
+       if (boot_used_size && !finished) {
+               unsigned long size;
+               u32 attr;
+               efi_status_t s;
+               void *tmp;
+
+               s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
+
+               if (s != EFI_BUFFER_TOO_SMALL || !size)
+                       return status;
+
+               tmp = kmalloc(size, GFP_ATOMIC);
+
+               if (!tmp)
+                       return status;
+
+               s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
+
+               if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
+                       var_size += size;
+                       var_size += ucs2_strsize(name, 1024);
+                       active_size += size;
+                       active_size += VAR_METADATA_SIZE;
+                       active_size += ucs2_strsize(name, 1024);
+               }
+
+               kfree(tmp);
+       }
+
+       return status;
 }
 
 static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
                                          unsigned long data_size,
                                          void *data)
 {
-       return efi_call_virt5(set_variable,
-                             name, vendor, attr,
-                             data_size, data);
+       efi_status_t status;
+       u32 orig_attr = 0;
+       unsigned long orig_size = 0;
+
+       status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
+                                      NULL);
+
+       if (status != EFI_BUFFER_TOO_SMALL)
+               orig_size = 0;
+
+       status = efi_call_virt5(set_variable,
+                               name, vendor, attr,
+                               data_size, data);
+
+       if (status == EFI_SUCCESS) {
+               if (orig_size) {
+                       active_size -= orig_size;
+                       active_size -= ucs2_strsize(name, 1024);
+                       active_size -= VAR_METADATA_SIZE;
+               }
+               if (data_size) {
+                       active_size += data_size;
+                       active_size += ucs2_strsize(name, 1024);
+                       active_size += VAR_METADATA_SIZE;
+               }
+       }
+
+       return status;
 }
 
 static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -682,6 +776,9 @@ void __init efi_init(void)
        char vendor[100] = "unknown";
        int i = 0;
        void *tmp;
+       struct setup_data *data;
+       struct efi_var_bootdata *efi_var_data;
+       u64 pa_data;
 
 #ifdef CONFIG_X86_32
        if (boot_params.efi_info.efi_systab_hi ||
@@ -699,6 +796,22 @@ void __init efi_init(void)
        if (efi_systab_init(efi_phys.systab))
                return;
 
+       pa_data = boot_params.hdr.setup_data;
+       while (pa_data) {
+               data = early_ioremap(pa_data, sizeof(*efi_var_data));
+               if (data->type == SETUP_EFI_VARS) {
+                       efi_var_data = (struct efi_var_bootdata *)data;
+
+                       efi_var_store_size = efi_var_data->store_size;
+                       efi_var_remaining_size = efi_var_data->remaining_size;
+                       efi_var_max_var_size = efi_var_data->max_var_size;
+               }
+               pa_data = data->next;
+               early_iounmap(data, sizeof(*efi_var_data));
+       }
+
+       boot_used_size = efi_var_store_size - efi_var_remaining_size;
+
        set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
 
        /*
@@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
        }
        return 0;
 }
+
+/*
+ * Some firmware has serious problems when using more than 50% of the EFI
+ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
+ * we never use more than this safe limit.
+ *
+ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
+ * store.
+ */
+efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+       efi_status_t status;
+       u64 storage_size, remaining_size, max_size;
+
+       status = efi.query_variable_info(attributes, &storage_size,
+                                        &remaining_size, &max_size);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       if (!max_size && remaining_size > size)
+               printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
+                           " is returning MaxVariableSize=0\n");
+       /*
+        * Some firmware implementations refuse to boot if there's insufficient
+        * space in the variable store. We account for that by refusing the
+        * write if permitting it would reduce the available space to under
+        * 50%. However, some firmware won't reclaim variable space until
+        * after the used (not merely the actively used) space drops below
+        * a threshold. We can approximate that case with the value calculated
+        * above. If both the firmware and our calculations indicate that the
+        * available space would drop below 50%, refuse the write.
+        */
+
+       if (!storage_size || size > remaining_size ||
+           (max_size && size > max_size))
+               return EFI_OUT_OF_RESOURCES;
+
+       if (!efi_no_storage_paranoia &&
+           ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
+            (remaining_size - size < storage_size / 2)))
+               return EFI_OUT_OF_RESOURCES;
+
+       return EFI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(efi_query_variable_store);
index 074b758efc42cf116d61f4755f0107f491890980..7c288358a745ad2312e93080201e341cf2b69207 100644 (file)
@@ -39,6 +39,7 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
 
 DEFINE_IDA(blk_queue_ida);
index f556f8a8b3f9b476949c6133f39778c49502e380..b7b7a88d9f689cdd3697ac8074de1887e7d50c15 100644 (file)
@@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
        struct rbd_device *rbd_dev = img_request->rbd_dev;
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
        struct rbd_obj_request *obj_request;
+       struct rbd_obj_request *next_obj_request;
 
        dout("%s: img %p\n", __func__, img_request);
-       for_each_obj_request(img_request, obj_request) {
+       for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
                int ret;
 
                obj_request->callback = rbd_img_obj_callback;
index e3f9a99b8522e41dd2a4005142ea396838f26bbf..d784650d14f0d113c5d852e47d9dd676741e3b4f 100644 (file)
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
        struct hpet_dev *devp;
        unsigned long addr;
 
-       if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
-               return -EINVAL;
-
        devp = file->private_data;
        addr = devp->hd_hpets->hp_hpet_phys;
 
        if (addr & (PAGE_SIZE - 1))
                return -ENOSYS;
 
-       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
-                                       PAGE_SIZE, vma->vm_page_prot)) {
-               printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
-                       __func__);
-               return -EAGAIN;
-       }
-
-       return 0;
+       return vm_iomap_memory(vma, addr, PAGE_SIZE);
 #else
        return -ENOSYS;
 #endif
index 6e13f262139a89acabf4c512ce8461d62e810a61..88cfc61329d20fb137a46ff852569bf8b567c60a 100644 (file)
@@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
 
        dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
 
-       BUG_ON(atc_chan_is_enabled(atchan));
-
        /*
         * Submit queued descriptors ASAP, i.e. before we go through
         * the completed ones.
@@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan)
 {
        dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
 
+       if (atc_chan_is_enabled(atchan))
+               return;
+
        if (list_empty(&atchan->active_list) ||
            list_is_singular(&atchan->active_list)) {
                atc_complete_all(atchan);
@@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan)
                return;
 
        spin_lock_irqsave(&atchan->lock, flags);
-       if (!atc_chan_is_enabled(atchan)) {
-               atc_advance_work(atchan);
-       }
+       atc_advance_work(atchan);
        spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
index 42c759a4d047f6754ddf8676a390d8d0dcdffafb..3e532002e4d1c01a3d21cd84c3a4a5e22e5e2440 100644 (file)
@@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP
 config EFI_VARS
        tristate "EFI Variable Support via sysfs"
        depends on EFI
+       select UCS2_STRING
        default n
        help
          If you say Y here, you are able to get EFI (Extensible Firmware
index 7acafb80fd4c79b19b1c53551bb5e7e8612af707..182ce9471175870f3ce4c0ab14ad81ddcbb1f197 100644 (file)
@@ -80,6 +80,7 @@
 #include <linux/slab.h>
 #include <linux/pstore.h>
 #include <linux/ctype.h>
+#include <linux/ucs2_string.h>
 
 #include <linux/fs.h>
 #include <linux/ramfs.h>
@@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *);
 static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
 static bool efivar_wq_enabled = true;
 
-/* Return the number of unicode characters in data */
-static unsigned long
-utf16_strnlen(efi_char16_t *s, size_t maxlength)
-{
-       unsigned long length = 0;
-
-       while (*s++ != 0 && length < maxlength)
-               length++;
-       return length;
-}
-
-static inline unsigned long
-utf16_strlen(efi_char16_t *s)
-{
-       return utf16_strnlen(s, ~0UL);
-}
-
-/*
- * Return the number of bytes is the length of this string
- * Note: this is NOT the same as the number of unicode characters
- */
-static inline unsigned long
-utf16_strsize(efi_char16_t *data, unsigned long maxlength)
-{
-       return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
-}
-
-static inline int
-utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
-{
-       while (1) {
-               if (len == 0)
-                       return 0;
-               if (*a < *b)
-                       return -1;
-               if (*a > *b)
-                       return 1;
-               if (*a == 0) /* implies *b == 0 */
-                       return 0;
-               a++;
-               b++;
-               len--;
-       }
-}
-
 static bool
 validate_device_path(struct efi_variable *var, int match, u8 *buffer,
                     unsigned long len)
@@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
        u16 filepathlength;
        int i, desclength = 0, namelen;
 
-       namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+       namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
 
        /* Either "Boot" or "Driver" followed by four digits of hex */
        for (i = match; i < match+4; i++) {
@@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
         * There's no stored length for the description, so it has to be
         * found by hand
         */
-       desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+       desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
 
        /* Each boot entry must have a descriptor */
        if (!desclength)
@@ -436,24 +392,12 @@ static efi_status_t
 check_var_size_locked(struct efivars *efivars, u32 attributes,
                        unsigned long size)
 {
-       u64 storage_size, remaining_size, max_size;
-       efi_status_t status;
        const struct efivar_operations *fops = efivars->ops;
 
-       if (!efivars->ops->query_variable_info)
+       if (!efivars->ops->query_variable_store)
                return EFI_UNSUPPORTED;
 
-       status = fops->query_variable_info(attributes, &storage_size,
-                                          &remaining_size, &max_size);
-
-       if (status != EFI_SUCCESS)
-               return status;
-
-       if (!storage_size || size > remaining_size || size > max_size ||
-           (remaining_size - size) < (storage_size / 2))
-               return EFI_OUT_OF_RESOURCES;
-
-       return status;
+       return fops->query_variable_store(attributes, size);
 }
 
 
@@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        spin_lock_irq(&efivars->lock);
 
        status = check_var_size_locked(efivars, new_var->Attributes,
-              new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+              new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
 
        if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
                status = efivars->ops->set_variable(new_var->VariableName,
@@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file,
         * QueryVariableInfo() isn't supported by the firmware.
         */
 
-       varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
+       varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
        status = check_var_size(efivars, attributes, varsize);
 
        if (status != EFI_SUCCESS) {
@@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 
                inode = NULL;
 
-               len = utf16_strlen(entry->var.VariableName);
+               len = ucs2_strlen(entry->var.VariableName);
 
                /* name, plus '-', plus GUID, plus NUL*/
                name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
@@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
 
                if (efi_guidcmp(entry->var.VendorGuid, vendor))
                        continue;
-               if (utf16_strncmp(entry->var.VariableName, efi_name,
-                                 utf16_strlen(efi_name))) {
+               if (ucs2_strncmp(entry->var.VariableName, efi_name,
+                                 ucs2_strlen(efi_name))) {
                        /*
                         * Check if an old format,
                         * which doesn't support holding
@@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
                        for (i = 0; i < DUMP_NAME_LEN; i++)
                                efi_name_old[i] = name_old[i];
 
-                       if (utf16_strncmp(entry->var.VariableName, efi_name_old,
-                                         utf16_strlen(efi_name_old)))
+                       if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
+                                         ucs2_strlen(efi_name_old)))
                                continue;
                }
 
@@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
         * Does this variable already exist?
         */
        list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
-               strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
-               strsize2 = utf16_strsize(new_var->VariableName, 1024);
+               strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
+               strsize2 = ucs2_strsize(new_var->VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(&(search_efivar->var.VariableName),
                                new_var->VariableName, strsize1) &&
@@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
        }
 
        status = check_var_size_locked(efivars, new_var->Attributes,
-              new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+              new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
 
        if (status && status != EFI_UNSUPPORTED) {
                spin_unlock_irq(&efivars->lock);
@@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
 
        /* Create the entry in sysfs.  Locking is not required here */
        status = efivar_create_sysfs_entry(efivars,
-                                          utf16_strsize(new_var->VariableName,
+                                          ucs2_strsize(new_var->VariableName,
                                                         1024),
                                           new_var->VariableName,
                                           &new_var->VendorGuid);
@@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
         * Does this variable already exist?
         */
        list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
-               strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
-               strsize2 = utf16_strsize(del_var->VariableName, 1024);
+               strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
+               strsize2 = ucs2_strsize(del_var->VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(&(search_efivar->var.VariableName),
                                del_var->VariableName, strsize1) &&
@@ -1691,9 +1635,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
        unsigned long strsize1, strsize2;
        bool found = false;
 
-       strsize1 = utf16_strsize(variable_name, 1024);
+       strsize1 = ucs2_strsize(variable_name, 1024);
        list_for_each_entry_safe(entry, n, &efivars->list, list) {
-               strsize2 = utf16_strsize(entry->var.VariableName, 1024);
+               strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(variable_name, &(entry->var.VariableName),
                                strsize2) &&
@@ -2131,7 +2075,7 @@ efivars_init(void)
        ops.get_variable = efi.get_variable;
        ops.set_variable = efi.set_variable;
        ops.get_next_variable = efi.get_next_variable;
-       ops.query_variable_info = efi.query_variable_info;
+       ops.query_variable_store = efi_query_variable_store;
 
        error = register_efivars(&__efivars, &ops, efi_kobj);
        if (error)
index 5d66750138647d619d9521922a3d2c5421db73e4..1a38dd7dfe4e56c9b749cac1e741fe4ad092024c 100644 (file)
@@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
        ICPU(0x3c, idle_cpu_hsw),
        ICPU(0x3f, idle_cpu_hsw),
        ICPU(0x45, idle_cpu_hsw),
+       ICPU(0x46, idle_cpu_hsw),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
index 1daa97913b7d20068845865a405387404fffe1d8..0bfd8cf252002d4095dc699cf8277931b1ec00a0 100644 (file)
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x802: /* Intuos4 General Pen */
                case 0x804: /* Intuos4 Marker Pen */
                case 0x40802: /* Intuos4 Classic Pen */
-               case 0x18803: /* DTH2242 Grip Pen */
+               case 0x18802: /* DTH2242 Grip Pen */
                case 0x022:
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
        { "Wacom Intuos4 12x19",  WACOM_PKGLEN_INTUOS,    97536, 60960, 2047,
          63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xBC =
-       { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40840, 25400, 2047,
+       { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40640, 25400, 2047,
          63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0x26 =
        { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS,  31496, 19685, 2047,
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x44) },
        { USB_DEVICE_WACOM(0x45) },
        { USB_DEVICE_WACOM(0x59) },
-       { USB_DEVICE_WACOM(0x5D) },
+       { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xB0) },
        { USB_DEVICE_WACOM(0xB1) },
        { USB_DEVICE_WACOM(0xB2) },
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
        { USB_DEVICE_WACOM(0xF8) },
-       { USB_DEVICE_WACOM(0xF6) },
+       { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xFA) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index a32e0d5aa45f43eb71c91ab9020696436212ae95..fc6aebf1e4b2630493b8e3ff89a2f76650019ef9 100644 (file)
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
        if (gic_arch_extn.irq_retrigger)
                return gic_arch_extn.irq_retrigger(d);
 
-       return -ENXIO;
+       /* the genirq layer expects 0 if we can't retrigger in hardware */
+       return 0;
 }
 
 #ifdef CONFIG_SMP
index 7e469260fe5eba5272ab56f74d9bd1c38fc2cfbf..9a0bdad9ad8fad49eaf8ee0d0ad916df247f0f3c 100644 (file)
@@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error)
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
+                       trace_block_bio_complete(md->queue, bio, io_error);
                        bio_endio(bio, io_error);
                }
        }
index 24909eb13fec1b0bf22e40caa4ff7a76967f80e4..f4e87bfc7567923944d2fce13da4c7d636479566 100644 (file)
@@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi)
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
+               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+                                        bi, 0);
                bio_endio(bi, 0);
                bi = return_bi;
        }
@@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error)
        rdev_dec_pending(rdev, conf->mddev);
 
        if (!error && uptodate) {
+               trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+                                        raid_bi, 0);
                bio_endio(raid_bi, 0);
                if (atomic_dec_and_test(&conf->active_aligned_reads))
                        wake_up(&conf->wait_for_stripe);
@@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                if ( rw == WRITE )
                        md_write_end(mddev);
 
+               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+                                        bi, 0);
                bio_endio(bi, 0);
        }
 }
@@ -4758,8 +4764,11 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
                handled++;
        }
        remaining = raid5_dec_bi_active_stripes(raid_bio);
-       if (remaining == 0)
+       if (remaining == 0) {
+               trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+                                        raid_bio, 0);
                bio_endio(raid_bio, 0);
+       }
        if (atomic_dec_and_test(&conf->active_aligned_reads))
                wake_up(&conf->wait_for_stripe);
        return handled;
index 92ab30ab00dcab1d0bc6405a94bbe3aeffcfb128..dc571ebc1aa0be8680f4a20397653e9d4a7f84cf 100644 (file)
@@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
 }
 #endif
 
-static inline unsigned long get_vm_size(struct vm_area_struct *vma)
-{
-       return vma->vm_end - vma->vm_start;
-}
-
-static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
-{
-       return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
-}
-
-/*
- * Set a new vm offset.
- *
- * Verify that the incoming offset really works as a page offset,
- * and that the offset and size fit in a resource_size_t.
- */
-static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
-{
-       pgoff_t pgoff = off >> PAGE_SHIFT;
-       if (off != (resource_size_t) pgoff << PAGE_SHIFT)
-               return -EINVAL;
-       if (off + get_vm_size(vma) - 1 < off)
-               return -EINVAL;
-       vma->vm_pgoff = pgoff;
-       return 0;
-}
-
 /*
  * set up a mapping for shared memory segments
  */
@@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
        struct map_info *map = mtd->priv;
-       resource_size_t start, off;
-       unsigned long len, vma_len;
 
         /* This is broken because it assumes the MTD device is map-based
           and that mtd->priv is a valid struct map_info.  It should be
           replaced with something that uses the mtd_get_unmapped_area()
           operation properly. */
        if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
-               off = get_vm_offset(vma);
-               start = map->phys;
-               len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
-               start &= PAGE_MASK;
-               vma_len = get_vm_size(vma);
-
-               /* Overflow in off+len? */
-               if (vma_len + off < off)
-                       return -EINVAL;
-               /* Does it fit in the mapping? */
-               if (vma_len + off > len)
-                       return -EINVAL;
-
-               off += start;
-               /* Did that overflow? */
-               if (off < start)
-                       return -EINVAL;
-               if (set_vm_offset(vma, off) < 0)
-                       return -EINVAL;
-               vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
 #ifdef pgprot_noncached
-               if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
+               if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 #endif
-               if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                                      vma->vm_end - vma->vm_start,
-                                      vma->vm_page_prot))
-                       return -EAGAIN;
-
-               return 0;
+               return vm_iomap_memory(vma, map->phys, map->size);
        }
        return -ENOSYS;
 #else
index 07401a3e256bf3b1c5f56c607d80f757b448105a..dbbea0eec134f1b2f0225161a33111264450f48e 100644 (file)
@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(old_active->dev, -1);
 
+               netif_addr_lock_bh(bond->dev);
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_del(old_active->dev, ha->addr);
+               netif_addr_unlock_bh(bond->dev);
        }
 
        if (new_active) {
@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(new_active->dev, 1);
 
+               netif_addr_lock_bh(bond->dev);
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_add(new_active->dev, ha->addr);
+               netif_addr_unlock_bh(bond->dev);
        }
 }
 
@@ -1901,11 +1905,29 @@ err_dest_symlinks:
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
 err_detach:
+       if (!USES_PRIMARY(bond->params.mode)) {
+               netif_addr_lock_bh(bond_dev);
+               bond_mc_list_flush(bond_dev, slave_dev);
+               netif_addr_unlock_bh(bond_dev);
+       }
+       bond_del_vlans_from_slave(bond, slave_dev);
        write_lock_bh(&bond->lock);
        bond_detach_slave(bond, new_slave);
+       if (bond->primary_slave == new_slave)
+               bond->primary_slave = NULL;
        write_unlock_bh(&bond->lock);
+       if (bond->curr_active_slave == new_slave) {
+               read_lock(&bond->lock);
+               write_lock_bh(&bond->curr_slave_lock);
+               bond_change_active_slave(bond, NULL);
+               bond_select_active_slave(bond);
+               write_unlock_bh(&bond->curr_slave_lock);
+               read_unlock(&bond->lock);
+       }
+       slave_disable_netpoll(new_slave);
 
 err_close:
+       slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
 err_unset_master:
@@ -3168,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event,
                                   struct net_device *slave_dev)
 {
        struct slave *slave = bond_slave_get_rtnl(slave_dev);
-       struct bonding *bond = slave->bond;
-       struct net_device *bond_dev = slave->bond->dev;
+       struct bonding *bond;
+       struct net_device *bond_dev;
        u32 old_speed;
        u8 old_duplex;
 
+       /* A netdev event can be generated while enslaving a device
+        * before netdev_rx_handler_register is called in which case
+        * slave will be NULL
+        */
+       if (!slave)
+               return NOTIFY_DONE;
+       bond_dev = slave->bond->dev;
+       bond = slave->bond;
+
        switch (event) {
        case NETDEV_UNREGISTER:
                if (bond->setup_by_slave)
@@ -3286,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
  */
 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
+       const struct ethhdr *data;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
        u32 v6hash;
-       __be32 *s, *d;
+       const __be32 *s, *d;
 
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_network_may_pull(skb, sizeof(*iph))) {
                iph = ip_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
                v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
@@ -3318,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
        u32 layer4_xor = 0;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
-       __be32 *s, *d;
-       __be16 *layer4hdr;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
+       const __be32 *s, *d;
+       const __be16 *l4 = NULL;
+       __be16 _l4[2];
+       int noff = skb_network_offset(skb);
+       int poff;
 
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_may_pull(skb, noff + sizeof(*iph))) {
                iph = ip_hdr(skb);
-               if (!ip_is_fragment(iph) &&
-                   (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(iph->protocol);
+
+               if (!ip_is_fragment(iph) && poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
-               if ((ipv6h->nexthdr == IPPROTO_TCP ||
-                    ipv6h->nexthdr == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)(ipv6h + 1);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(ipv6h->nexthdr);
+               if (poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
index f32b9fc6a983205151feabc8512e1fac5a7df88a..9aa0c64c33c81c9f296ed5f08b0d3666ee989816 100644 (file)
@@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net)
        struct mcp251x_priv *priv = netdev_priv(net);
        struct spi_device *spi = priv->spi;
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+       unsigned long flags;
        int ret;
 
        ret = open_candev(net);
@@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net)
        priv->tx_skb = NULL;
        priv->tx_len = 0;
 
+       flags = IRQF_ONESHOT;
+       if (pdata->irq_flags)
+               flags |= pdata->irq_flags;
+       else
+               flags |= IRQF_TRIGGER_FALLING;
+
        ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
-                 pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
-                 DEVICE_NAME, priv);
+                                  flags, DEVICE_NAME, priv);
        if (ret) {
                dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
                if (pdata->transceiver_enable)
index 6433b81256cdafd1cfe35fcc1a37d0a378411f59..8e0c4a0019397f61af74d44da1b60ca0c61032d2 100644 (file)
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
        struct net_device *dev;
        struct sja1000_priv *priv;
        struct resource res;
-       const u32 *prop;
-       int err, irq, res_size, prop_size;
+       u32 prop;
+       int err, irq, res_size;
        void __iomem *base;
 
        err = of_address_to_resource(np, 0, &res);
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
        priv->read_reg = sja1000_ofp_read_reg;
        priv->write_reg = sja1000_ofp_write_reg;
 
-       prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
-       if (prop && (prop_size ==  sizeof(u32)))
-               priv->can.clock.freq = *prop / 2;
+       err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
+       if (!err)
+               priv->can.clock.freq = prop / 2;
        else
                priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
 
-       prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
-       if (prop && (prop_size == sizeof(u32)))
-               priv->ocr |= *prop & OCR_MODE_MASK;
+       err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
+       if (!err)
+               priv->ocr |= prop & OCR_MODE_MASK;
        else
                priv->ocr |= OCR_MODE_NORMAL; /* default */
 
-       prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
-       if (prop && (prop_size == sizeof(u32)))
-               priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+       err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
+       if (!err)
+               priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
        else
                priv->ocr |= OCR_TX0_PULLDOWN; /* default */
 
-       prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
-       if (prop && (prop_size == sizeof(u32)) && *prop) {
-               u32 divider = priv->can.clock.freq * 2 / *prop;
+       err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
+       if (!err && prop) {
+               u32 divider = priv->can.clock.freq * 2 / prop;
 
                if (divider > 1)
                        priv->cdr |= divider / 2 - 1;
@@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
                priv->cdr |= CDR_CLK_OFF; /* default */
        }
 
-       prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
-       if (!prop)
+       if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
                priv->cdr |= CDR_CBP; /* default */
 
        priv->irq_flags = IRQF_SHARED;
index cab306a9888ea5a5ac0036058eaa0290968dbbec..e1d26433d61921b3e0460421212006bcb5e0e9ee 100644 (file)
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
        struct ei_device *ei_local;
        struct ax_device *ax;
        struct resource *irq, *mem, *mem2;
-       resource_size_t mem_size, mem2_size = 0;
+       unsigned long mem_size, mem2_size = 0;
        int ret = 0;
 
        dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
index 4046f97378c29fa737d7123347ba1e940bd8e46d..57619dd4a92b0ef08ff0b990ae26db355bb732f3 100644 (file)
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                        }
                }
 
+               /* initialize FW coalescing state machines in RAM */
+               bnx2x_update_coalesce(bp);
+
                /* setup the leading queue */
                rc = bnx2x_setup_leading(bp);
                if (rc) {
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
        u32 addr = BAR_CSTRORM_INTMEM +
                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
-       u16 flags = REG_RD16(bp, addr);
+       u8 flags = REG_RD8(bp, addr);
        /* clear and set */
        flags &= ~HC_INDEX_DATA_HC_ENABLED;
        flags |= enable_flag;
-       REG_WR16(bp, addr, flags);
+       REG_WR8(bp, addr, flags);
        DP(NETIF_MSG_IFUP,
           "port %x fw_sb_id %d sb_index %d disable %d\n",
           port, fw_sb_id, sb_index, disable);
index 8e58da909f5c1c52a16fb172d364e0eca27e7a91..c50696b396f1cb88196d4f682d61d902d0da9fc6 100644 (file)
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                                REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
                        }
                }
+               if (!CHIP_IS_E1x(bp))
+                       /* block FW from writing to host */
+                       REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
                /* wait until BRB is empty */
                tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
                while (timer_count) {
index 08e54f3d288bc9a2e23faa8bb954dcb4bd8cf584..2886c9b63f9099059d3c8c0fe7a86cddc912854f 100644 (file)
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
 
        if (vlan_tx_tag_present(skb)) {
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-               __vlan_put_tag(skb, vlan_tag);
-               skb->vlan_tci = 0;
+               skb = __vlan_put_tag(skb, vlan_tag);
+               if (skb)
+                       skb->vlan_tci = 0;
        }
 
        return skb;
index f292c3aa423fbdeabb83ebdafa3b1699e988b19a..73195f643c9c3b23e45a0fac4008caa919473a25 100644 (file)
@@ -1002,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
        } else {
                if (fep->link) {
                        fec_stop(ndev);
+                       fep->link = phy_dev->link;
                        status_change = 1;
                }
        }
index 25151401c2abe54dcec7fe1fe6f48791e0660a64..ab577a763a20d96cb68c151b8f3ceb483af6f8bf 100644 (file)
@@ -284,18 +284,10 @@ struct igb_q_vector {
 enum e1000_ring_flags_t {
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
-       IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
        IGB_RING_FLAG_TX_CTX_IDX,
        IGB_RING_FLAG_TX_DETECT_HANG
 };
 
-#define ring_uses_build_skb(ring) \
-       test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define set_ring_build_skb_enabled(ring) \
-       set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define clear_ring_build_skb_enabled(ring) \
-       clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-
 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
 
 #define IGB_RX_DESC(R, i)          \
index 8496adfc6a685580f6ec1c50b86f0fed62b2b121..64f75291e3a5ca5402dfbee6dccb7a68bf1f1dd0 100644 (file)
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
-static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
-                                 struct igb_ring *rx_ring)
-{
-#define IGB_MAX_BUILD_SKB_SIZE \
-       (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
-        (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
-
-       /* set build_skb flag */
-       if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
-               set_ring_build_skb_enabled(rx_ring);
-       else
-               clear_ring_build_skb_enabled(rx_ring);
-}
-
 /**
  * igb_configure_rx - Configure receive Unit after Reset
  * @adapter: board private structure
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *rx_ring = adapter->rx_ring[i];
-               igb_set_rx_buffer_len(adapter, rx_ring);
-               igb_configure_rx_ring(adapter, rx_ring);
-       }
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 /**
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
        return igb_can_reuse_rx_page(rx_buffer, page, truesize);
 }
 
-static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
-                                          union e1000_adv_rx_desc *rx_desc)
-{
-       struct igb_rx_buffer *rx_buffer;
-       struct sk_buff *skb;
-       struct page *page;
-       void *page_addr;
-       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
-       unsigned int truesize = IGB_RX_BUFSZ;
-#else
-       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(NET_SKB_PAD +
-                                              NET_IP_ALIGN +
-                                              size);
-#endif
-
-       /* If we spanned a buffer we have a huge mess so test for it */
-       BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
-
-       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
-
-       page_addr = page_address(page) + rx_buffer->page_offset;
-
-       /* prefetch first cache line of first page */
-       prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
-#if L1_CACHE_BYTES < 128
-       prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
-#endif
-
-       /* build an skb to around the page buffer */
-       skb = build_skb(page_addr, truesize);
-       if (unlikely(!skb)) {
-               rx_ring->rx_stats.alloc_failed++;
-               return NULL;
-       }
-
-       /* we are reusing so sync this buffer for CPU use */
-       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                     rx_buffer->dma,
-                                     rx_buffer->page_offset,
-                                     IGB_RX_BUFSZ,
-                                     DMA_FROM_DEVICE);
-
-       /* update pointers within the skb to store the data */
-       skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
-       __skb_put(skb, size);
-
-       /* pull timestamp out of packet data */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
-               __skb_pull(skb, IGB_TS_HDR_LEN);
-       }
-
-       if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
-               /* hand second half of page back to the ring */
-               igb_reuse_rx_page(rx_ring, rx_buffer);
-       } else {
-               /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       }
-
-       /* clear contents of buffer_info */
-       rx_buffer->dma = 0;
-       rx_buffer->page = NULL;
-
-       return skb;
-}
-
 static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                                           union e1000_adv_rx_desc *rx_desc,
                                           struct sk_buff *skb)
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                rmb();
 
                /* retrieve a buffer from the ring */
-               if (ring_uses_build_skb(rx_ring))
-                       skb = igb_build_rx_buffer(rx_ring, rx_desc);
-               else
-                       skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+               skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb)
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
        return true;
 }
 
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
-       if (ring_uses_build_skb(rx_ring))
-               return NET_SKB_PAD + NET_IP_ALIGN;
-       else
-               return 0;
-}
-
 /**
  * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  * @adapter: address of board private structure
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
                 * Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
                 */
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
-                                                    bi->page_offset +
-                                                    igb_rx_offset(rx_ring));
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 
                rx_desc++;
                bi++;
index d44b4d21268ceafb33cd47c636db2809db2542f8..97e33669c0b9b4c6e4bc105ab5f125dffabd9ee0 100644 (file)
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
        if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
                return -EINVAL;
        if (vlan || qos) {
+               if (adapter->vfinfo[vf].pf_vlan)
+                       err = ixgbe_set_vf_vlan(adapter, false,
+                                               adapter->vfinfo[vf].pf_vlan,
+                                               vf);
+               if (err)
+                       goto out;
                err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
                if (err)
                        goto out;
index edfba9370922f54632f96440d394772a9da9054b..434e33c527df102adf5052854bc92d816588b4ba 100644 (file)
@@ -33,6 +33,7 @@ config MV643XX_ETH
 
 config MVMDIO
        tristate "Marvell MDIO interface support"
+       select PHYLIB
        ---help---
          This driver supports the MDIO interface found in the network
          interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -45,7 +46,6 @@ config MVMDIO
 config MVNETA
        tristate "Marvell Armada 370/XP network interface support"
        depends on MACH_ARMADA_370_XP
-       select PHYLIB
        select MVMDIO
        ---help---
          This driver supports the network interface units in the
index 1e628ce572018b30dba025f2c39e3a631112abdf..a47a097c21e13b1373fb6fa896374d8d6ffcd04c 100644 (file)
@@ -374,7 +374,6 @@ static int rxq_number = 8;
 static int txq_number = 8;
 
 static int rxq_def;
-static int txq_def;
 
 #define MVNETA_DRIVER_NAME "mvneta"
 #define MVNETA_DRIVER_VERSION "1.0"
@@ -1475,7 +1474,8 @@ error:
 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+       u16 txq_id = skb_get_queue_mapping(skb);
+       struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
        struct netdev_queue *nq;
        int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
                goto out;
 
        frags = skb_shinfo(skb)->nr_frags + 1;
-       nq    = netdev_get_tx_queue(dev, txq_def);
+       nq    = netdev_get_tx_queue(dev, txq_id);
 
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+       dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
        if (!dev)
                return -ENOMEM;
 
@@ -2844,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);
 module_param(txq_number, int, S_IRUGO);
 
 module_param(rxq_def, int, S_IRUGO);
-module_param(txq_def, int, S_IRUGO);
index cd5ae8813cb3967537bc047a475db69329993736..edd63f1230f3d0076f9bfbdcff146f62a6c2acaa 100644 (file)
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
                }
        } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
 
+       /* Make sure carrier is off and queue is stopped during loopback */
+       if (netif_running(netdev)) {
+               netif_carrier_off(netdev);
+               netif_stop_queue(netdev);
+       }
+
        ret = qlcnic_do_lb_test(adapter, mode);
 
        qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
 {
        struct qlcnic_cmd_args cmd;
+       struct net_device *netdev = adapter->netdev;
        int ret = 0;
 
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_TX, &ret);
        if (ret) {
-               dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+               netdev_err(netdev, "Error getting Tx stats\n");
                goto out;
        }
        /* Get MAC stats */
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_MAC, &ret);
        if (ret) {
-               dev_info(&adapter->pdev->dev,
-                        "Error getting Rx stats\n");
+               netdev_err(netdev, "Error getting MAC stats\n");
                goto out;
        }
        /* Get Rx stats */
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_RX, &ret);
        if (ret)
-               dev_info(&adapter->pdev->dev,
-                        "Error getting Tx stats\n");
+               netdev_err(netdev, "Error getting Rx stats\n");
 out:
        qlcnic_free_mbx_args(&cmd);
 }
index 0e630061bff31b41192d4b8fc7978e2be839e274..5fa847fe388a9df3b7f78b44b63f72bffaaeeced 100644 (file)
@@ -358,8 +358,7 @@ set_flags:
                memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
        }
        opcode = TX_ETHER_PKT;
-       if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-           skb_shinfo(skb)->gso_size > 0) {
+       if (skb_is_gso(skb)) {
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
                first_desc->total_hdr_length = hdr_len;
index 987fb6f8adc3b34a1089b9e44de928de5fba0119..5ef328af61d0ab9fcbc1c098b323f444fbd63854 100644 (file)
@@ -200,10 +200,10 @@ beacon_err:
        }
 
        err = qlcnic_config_led(adapter, b_state, b_rate);
-       if (!err)
+       if (!err) {
                err = len;
-       else
                ahw->beacon_state = b_state;
+       }
 
        if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
                qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
index a131d7b5d2fe00e1f96997bef8f8afa54186aaa5..7e8d68263963aa5928f5ef1f145ad27624c14ce2 100644 (file)
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.31"
+#define DRV_VERSION    "v1.00.00.32"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
index 6f316ab23257dc7343776d92db05cc471561d681..0780e039b2718d902805414daeb6844b7d2086e7 100644 (file)
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
 
        ecmd->supported = SUPPORTED_10000baseT_Full;
        ecmd->advertising = ADVERTISED_10000baseT_Full;
-       ecmd->autoneg = AUTONEG_ENABLE;
        ecmd->transceiver = XCVR_EXTERNAL;
        if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
                                STS_LINK_TYPE_10GBASET) {
                ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
                ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
                ecmd->port = PORT_TP;
+               ecmd->autoneg = AUTONEG_ENABLE;
        } else {
                ecmd->supported |= SUPPORTED_FIBRE;
                ecmd->advertising |= ADVERTISED_FIBRE;
index b13ab544a7eb56760063474b598163227f27942f..8033555e53c2f6f524211b4d848dcc13a0e14f78 100644 (file)
@@ -1434,11 +1434,13 @@ map_error:
 }
 
 /* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+                                struct rx_ring *rx_ring)
 {
        struct nic_stats *stats = &qdev->nic_stats;
 
        stats->rx_err_count++;
+       rx_ring->rx_errors++;
 
        switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
        case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
        struct napi_struct *napi = &rx_ring->napi;
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               put_page(lbq_desc->p.pg_chunk.page);
+               return;
+       }
        napi->dev = qdev->ndev;
 
        skb = napi_get_frags(napi);
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
        addr = lbq_desc->p.pg_chunk.va;
        prefetch(addr);
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               goto err_out;
+       }
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
        memcpy(skb_put(new_skb, length), skb->data, length);
        skb = new_skb;
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
        /* loopback self test for ethtool */
        if (test_bit(QL_SELFTEST, &qdev->flags)) {
                ql_check_lb_frame(qdev, skb);
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
                return;
        }
 
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
 
        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
 
-       /* Frame error, so drop the packet. */
-       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
-               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
-               return (unsigned long)length;
-       }
-
        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
                /* The data and headers are split into
                 * separate buffers.
index 0c74a702d4610597da59200be253306c7b4db534..50617c5a0bdb5e63fc930f7af277a1a9fb139af4 100644 (file)
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 {
        writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
        writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
 }
 
 /* This reads the MAC core counters (if actaully supported).
index 80cad06e5eb21337b111bc32af0376e6da2d7065..4781d3d8e18204ad7fc11d06d102fd2eb6829da4 100644 (file)
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
 
                if (data->dual_emac) {
-                       if (of_property_read_u32(node, "dual_emac_res_vlan",
+                       if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
                                pr_err("Missing dual_emac_res_vlan in DT.\n");
                                slave_data->dual_emac_res_vlan = i+1;
index b7c457adc0dc7439cc2a842409ed3871207f079b..729ed533bb33834fac05825be4b344a755e0e859 100644 (file)
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                if (tun->flags & TUN_TAP_MQ &&
                    (tun->numqueues + tun->numdisabled > 1))
-                       return err;
+                       return -EBUSY;
        }
        else {
                char *name;
index 16c842997291483eb12306d9ccf0a638772f18f0..6bd91676d2cbb9a46b43108ae1fa994d4fca84f9 100644 (file)
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                goto error;
 
        if (skb) {
-               if (skb->len <= sizeof(ETH_HLEN))
+               if (skb->len <= ETH_HLEN)
                        goto error;
 
                /* mapping VLANs to MBIM sessions:
index 968d5d50751dc120b406f54fffca49b46eafa230..2a3579f679103da7864260f114f98419852eaecd 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
+#include <linux/etherdevice.h>
 #include <linux/mii.h>
 #include <linux/usb.h>
 #include <linux/usb/cdc.h>
@@ -52,6 +53,96 @@ struct qmi_wwan_state {
        struct usb_interface *data;
 };
 
+/* default ethernet address used by the modem */
+static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+
+/* Make up an ethernet header if the packet doesn't have one.
+ *
+ * A firmware bug common among several devices cause them to send raw
+ * IP packets under some circumstances.  There is no way for the
+ * driver/host to know when this will happen.  And even when the bug
+ * hits, some packets will still arrive with an intact header.
+ *
+ * The supported devices are only capably of sending IPv4, IPv6 and
+ * ARP packets on a point-to-point link. Any packet with an ethernet
+ * header will have either our address or a broadcast/multicast
+ * address as destination.  ARP packets will always have a header.
+ *
+ * This means that this function will reliably add the appropriate
+ * header iff necessary, provided our hardware address does not start
+ * with 4 or 6.
+ *
+ * Another common firmware bug results in all packets being addressed
+ * to 00:a0:c6:00:00:00 despite the host address being different.
+ * This function will also fixup such packets.
+ */
+static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+       __be16 proto;
+
+       /* usbnet rx_complete guarantees that skb->len is at least
+        * hard_header_len, so we can inspect the dest address without
+        * checking skb->len
+        */
+       switch (skb->data[0] & 0xf0) {
+       case 0x40:
+               proto = htons(ETH_P_IP);
+               break;
+       case 0x60:
+               proto = htons(ETH_P_IPV6);
+               break;
+       case 0x00:
+               if (is_multicast_ether_addr(skb->data))
+                       return 1;
+               /* possibly bogus destination - rewrite just in case */
+               skb_reset_mac_header(skb);
+               goto fix_dest;
+       default:
+               /* pass along other packets without modifications */
+               return 1;
+       }
+       if (skb_headroom(skb) < ETH_HLEN)
+               return 0;
+       skb_push(skb, ETH_HLEN);
+       skb_reset_mac_header(skb);
+       eth_hdr(skb)->h_proto = proto;
+       memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+fix_dest:
+       memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
+       return 1;
+}
+
+/* very simplistic detection of IPv4 or IPv6 headers */
+static bool possibly_iphdr(const char *data)
+{
+       return (data[0] & 0xd0) == 0x40;
+}
+
+/* disallow addresses which may be confused with IP headers */
+static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
+{
+       int ret;
+       struct sockaddr *addr = p;
+
+       ret = eth_prepare_mac_addr_change(dev, p);
+       if (ret < 0)
+               return ret;
+       if (possibly_iphdr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       eth_commit_mac_addr_change(dev, p);
+       return 0;
+}
+
+static const struct net_device_ops qmi_wwan_netdev_ops = {
+       .ndo_open               = usbnet_open,
+       .ndo_stop               = usbnet_stop,
+       .ndo_start_xmit         = usbnet_start_xmit,
+       .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_set_mac_address    = qmi_wwan_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
 /* using a counter to merge subdriver requests with our own into a combined state */
 static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 {
@@ -229,6 +320,18 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
        }
 
+       /* Never use the same address on both ends of the link, even
+        * if the buggy firmware told us to.
+        */
+       if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
+               eth_hw_addr_random(dev->net);
+
+       /* make MAC addr easily distinguishable from an IP header */
+       if (possibly_iphdr(dev->net->dev_addr)) {
+               dev->net->dev_addr[0] |= 0x02;  /* set local assignment bit */
+               dev->net->dev_addr[0] &= 0xbf;  /* clear "IP" bit */
+       }
+       dev->net->netdev_ops = &qmi_wwan_netdev_ops;
 err:
        return status;
 }
@@ -307,6 +410,7 @@ static const struct driver_info     qmi_wwan_info = {
        .bind           = qmi_wwan_bind,
        .unbind         = qmi_wwan_unbind,
        .manage_power   = qmi_wwan_manage_power,
+       .rx_fixup       = qmi_wwan_rx_fixup,
 };
 
 #define HUAWEI_VENDOR_ID       0x12D1
index 28fd99203f6447e4d3545b0fd12b920733ffc10c..bdee2ed67219b6e1c56a0f03a6a28391ba3283aa 100644 (file)
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
        {0x00008258, 0x00000000},
        {0x0000825c, 0x40000000},
        {0x00008260, 0x00080922},
-       {0x00008264, 0x9bc00010},
+       {0x00008264, 0x9d400010},
        {0x00008268, 0xffffffff},
        {0x0000826c, 0x0000ffff},
        {0x00008270, 0x00000000},
index 467b60014b7b73720b25440c584ae9a49c968bcc..73fe8d6db566789ad2c6f34021d607c48a380855 100644 (file)
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
        u32 sz, i;
        struct channel_detector *cd;
 
-       cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+       cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
        if (cd == NULL)
                goto fail;
 
        INIT_LIST_HEAD(&cd->head);
        cd->freq = freq;
        sz = sizeof(cd->detectors) * dpd->num_radar_types;
-       cd->detectors = kzalloc(sz, GFP_KERNEL);
+       cd->detectors = kzalloc(sz, GFP_ATOMIC);
        if (cd->detectors == NULL)
                goto fail;
 
index 91b8dceeadb10ef6c9f84d20f4a5bba6cd1500dd..5e48c5515b8ccc0b59fac41844fd4a0a7c41883e 100644 (file)
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
 {
        struct pulse_elem *p = pool_get_pulse_elem();
        if (p == NULL) {
-               p = kmalloc(sizeof(*p), GFP_KERNEL);
+               p = kmalloc(sizeof(*p), GFP_ATOMIC);
                if (p == NULL) {
                        DFS_POOL_STAT_INC(pulse_alloc_error);
                        return false;
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
                ps.deadline_ts = ps.first_ts + ps.dur;
                new_ps = pool_get_pseq_elem();
                if (new_ps == NULL) {
-                       new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
+                       new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
                        if (new_ps == NULL) {
                                DFS_POOL_STAT_INC(pseq_alloc_error);
                                return false;
index 716058b675571b32c6693656dafeb567868b25cb..a47f5e05fc04835d6b747b8ba4553c91d6614178 100644 (file)
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
         * required version.
         */
        if (priv->fw_version_major != MAJOR_VERSION_REQ ||
-           priv->fw_version_minor != MINOR_VERSION_REQ) {
+           priv->fw_version_minor < MINOR_VERSION_REQ) {
                dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
                        MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
                return -EINVAL;
index e8486c1e091af2f2db2a23827dc612315398751b..b70f220bc4b378e626a59752fca5bce965456340 100644 (file)
@@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
 #endif
 #ifdef CONFIG_B43_SSB
        case B43_BUS_SSB:
-               /* FIXME */
+               ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
+                                           avoid);
                break;
 #endif
        }
index ec46ffff54092b2a351720e214fb4d008c508947..78da3eff75e8a1c5c126a60aab7bc90d7663a314 100644 (file)
@@ -4126,10 +4126,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
                         BIT(NL80211_IFTYPE_ADHOC) |
                         BIT(NL80211_IFTYPE_AP)
        },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
-       },
        {
                .max = 1,
                .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4187,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
                                 BIT(NL80211_IFTYPE_ADHOC) |
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) |
-                                BIT(NL80211_IFTYPE_P2P_GO) |
-                                BIT(NL80211_IFTYPE_P2P_DEVICE);
+                                BIT(NL80211_IFTYPE_P2P_GO);
        wiphy->iface_combinations = brcmf_iface_combos;
        wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
        wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
index c6451c61407a8c4510c97056bde23ef1ee88c060..e2340b231aa163cfba03f8b8f90c0822003a385d 100644 (file)
@@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
        }
 }
 
+/**
+ * This function frees the WL per-device resources.
+ *
+ * This function frees resources owned by the WL device pointed to
+ * by the wl parameter.
+ *
+ * precondition: can both be called locked and unlocked
+ *
+ */
+static void brcms_free(struct brcms_info *wl)
+{
+       struct brcms_timer *t, *next;
+
+       /* free ucode data */
+       if (wl->fw.fw_cnt)
+               brcms_ucode_data_free(&wl->ucode);
+       if (wl->irq)
+               free_irq(wl->irq, wl);
+
+       /* kill dpc */
+       tasklet_kill(&wl->tasklet);
+
+       if (wl->pub) {
+               brcms_debugfs_detach(wl->pub);
+               brcms_c_module_unregister(wl->pub, "linux", wl);
+       }
+
+       /* free common resources */
+       if (wl->wlc) {
+               brcms_c_detach(wl->wlc);
+               wl->wlc = NULL;
+               wl->pub = NULL;
+       }
+
+       /* virtual interface deletion is deferred so we cannot spinwait */
+
+       /* wait for all pending callbacks to complete */
+       while (atomic_read(&wl->callbacks) > 0)
+               schedule();
+
+       /* free timers */
+       for (t = wl->timers; t; t = next) {
+               next = t->next;
+#ifdef DEBUG
+               kfree(t->name);
+#endif
+               kfree(t);
+       }
+}
+
+/*
+* called from both kernel as from this kernel module (error flow on attach)
+* precondition: perimeter lock is not acquired.
+*/
+static void brcms_remove(struct bcma_device *pdev)
+{
+       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+       struct brcms_info *wl = hw->priv;
+
+       if (wl->wlc) {
+               wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
+               wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+               ieee80211_unregister_hw(hw);
+       }
+
+       brcms_free(wl);
+
+       bcma_set_drvdata(pdev, NULL);
+       ieee80211_free_hw(hw);
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static void brcms_release_fw(struct brcms_info *wl)
+{
+       int i;
+       for (i = 0; i < MAX_FW_IMAGES; i++) {
+               release_firmware(wl->fw.fw_bin[i]);
+               release_firmware(wl->fw.fw_hdr[i]);
+       }
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
+{
+       int status;
+       struct device *device = &pdev->dev;
+       char fw_name[100];
+       int i;
+
+       memset(&wl->fw, 0, sizeof(struct brcms_firmware));
+       for (i = 0; i < MAX_FW_IMAGES; i++) {
+               if (brcms_firmwares[i] == NULL)
+                       break;
+               sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
+                       UCODE_LOADER_API_VER);
+               status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
+               if (status) {
+                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+                                 KBUILD_MODNAME, fw_name);
+                       return status;
+               }
+               sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
+                       UCODE_LOADER_API_VER);
+               status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
+               if (status) {
+                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+                                 KBUILD_MODNAME, fw_name);
+                       return status;
+               }
+               wl->fw.hdr_num_entries[i] =
+                   wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
+       }
+       wl->fw.fw_cnt = i;
+       status = brcms_ucode_data_init(wl, &wl->ucode);
+       brcms_release_fw(wl);
+       return status;
+}
+
 static void brcms_ops_tx(struct ieee80211_hw *hw,
                         struct ieee80211_tx_control *control,
                         struct sk_buff *skb)
@@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        if (!blocked)
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
 
+       if (!wl->ucode.bcm43xx_bomminor) {
+               err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+               if (err) {
+                       brcms_remove(wl->wlc->hw->d11core);
+                       return -ENOENT;
+               }
+       }
+
        spin_lock_bh(&wl->lock);
        /* avoid acknowledging frames before a non-monitor device is added */
        wl->mute_tx = true;
@@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data)
        wake_up(&wl->tx_flush_wq);
 }
 
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
-{
-       int status;
-       struct device *device = &pdev->dev;
-       char fw_name[100];
-       int i;
-
-       memset(&wl->fw, 0, sizeof(struct brcms_firmware));
-       for (i = 0; i < MAX_FW_IMAGES; i++) {
-               if (brcms_firmwares[i] == NULL)
-                       break;
-               sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
-                       UCODE_LOADER_API_VER);
-               status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
-               if (status) {
-                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
-                                 KBUILD_MODNAME, fw_name);
-                       return status;
-               }
-               sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
-                       UCODE_LOADER_API_VER);
-               status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
-               if (status) {
-                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
-                                 KBUILD_MODNAME, fw_name);
-                       return status;
-               }
-               wl->fw.hdr_num_entries[i] =
-                   wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
-       }
-       wl->fw.fw_cnt = i;
-       return brcms_ucode_data_init(wl, &wl->ucode);
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static void brcms_release_fw(struct brcms_info *wl)
-{
-       int i;
-       for (i = 0; i < MAX_FW_IMAGES; i++) {
-               release_firmware(wl->fw.fw_bin[i]);
-               release_firmware(wl->fw.fw_hdr[i]);
-       }
-}
-
-/**
- * This function frees the WL per-device resources.
- *
- * This function frees resources owned by the WL device pointed to
- * by the wl parameter.
- *
- * precondition: can both be called locked and unlocked
- *
- */
-static void brcms_free(struct brcms_info *wl)
-{
-       struct brcms_timer *t, *next;
-
-       /* free ucode data */
-       if (wl->fw.fw_cnt)
-               brcms_ucode_data_free(&wl->ucode);
-       if (wl->irq)
-               free_irq(wl->irq, wl);
-
-       /* kill dpc */
-       tasklet_kill(&wl->tasklet);
-
-       if (wl->pub) {
-               brcms_debugfs_detach(wl->pub);
-               brcms_c_module_unregister(wl->pub, "linux", wl);
-       }
-
-       /* free common resources */
-       if (wl->wlc) {
-               brcms_c_detach(wl->wlc);
-               wl->wlc = NULL;
-               wl->pub = NULL;
-       }
-
-       /* virtual interface deletion is deferred so we cannot spinwait */
-
-       /* wait for all pending callbacks to complete */
-       while (atomic_read(&wl->callbacks) > 0)
-               schedule();
-
-       /* free timers */
-       for (t = wl->timers; t; t = next) {
-               next = t->next;
-#ifdef DEBUG
-               kfree(t->name);
-#endif
-               kfree(t);
-       }
-}
-
-/*
-* called from both kernel as from this kernel module (error flow on attach)
-* precondition: perimeter lock is not acquired.
-*/
-static void brcms_remove(struct bcma_device *pdev)
-{
-       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
-       struct brcms_info *wl = hw->priv;
-
-       if (wl->wlc) {
-               wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
-               wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
-               ieee80211_unregister_hw(hw);
-       }
-
-       brcms_free(wl);
-
-       bcma_set_drvdata(pdev, NULL);
-       ieee80211_free_hw(hw);
-}
-
 static irqreturn_t brcms_isr(int irq, void *dev_id)
 {
        struct brcms_info *wl;
@@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
        spin_lock_init(&wl->lock);
        spin_lock_init(&wl->isr_lock);
 
-       /* prepare ucode */
-       if (brcms_request_fw(wl, pdev) < 0) {
-               wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
-                         "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
-               brcms_release_fw(wl);
-               brcms_remove(pdev);
-               return NULL;
-       }
-
        /* common load-time initialization */
        wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
-       brcms_release_fw(wl);
        if (!wl->wlc) {
                wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
                          KBUILD_MODNAME, err);
index 45cacf79f3a73b7dfd97fa8293458febc02b7da9..1a779bbfb87d2b8de61e964734981f4f9a6c891e 100644 (file)
@@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = {
        { KE_KEY, 0x2142, { KEY_MEDIA } },
        { KE_KEY, 0x213b, { KEY_INFO } },
        { KE_KEY, 0x2169, { KEY_DIRECTION } },
-       { KE_KEY, 0x216a, { KEY_SETUP } },
        { KE_KEY, 0x231b, { KEY_HELP } },
        { KE_END, 0 }
 };
@@ -925,9 +924,6 @@ static int __init hp_wmi_init(void)
                err = hp_wmi_input_setup();
                if (err)
                        return err;
-               
-               //Enable magic for hotkeys that run on the SMBus
-               ec_write(0xe6,0x6e);
        }
 
        if (bios_capable) {
index 9a907567f41edf495f1c102d906ba63660de082e..edec135b1685d0525149bc21c724833398424cb1 100644 (file)
@@ -1964,9 +1964,6 @@ struct tp_nvram_state {
 /* kthread for the hotkey poller */
 static struct task_struct *tpacpi_hotkey_task;
 
-/* Acquired while the poller kthread is running, use to sync start/stop */
-static struct mutex hotkey_thread_mutex;
-
 /*
  * Acquire mutex to write poller control variables as an
  * atomic block.
@@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data)
        unsigned int poll_freq;
        bool was_frozen;
 
-       mutex_lock(&hotkey_thread_mutex);
-
        if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
                goto exit;
 
@@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data)
        }
 
 exit:
-       mutex_unlock(&hotkey_thread_mutex);
        return 0;
 }
 
@@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void)
        if (tpacpi_hotkey_task) {
                kthread_stop(tpacpi_hotkey_task);
                tpacpi_hotkey_task = NULL;
-               mutex_lock(&hotkey_thread_mutex);
-               /* at this point, the thread did exit */
-               mutex_unlock(&hotkey_thread_mutex);
        }
 }
 
@@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        mutex_init(&hotkey_mutex);
 
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-       mutex_init(&hotkey_thread_mutex);
        mutex_init(&hotkey_thread_data_mutex);
 #endif
 
index 1a9d1e3ce64cc94aa36581fd29cefb6084f3502f..c1441ed282eb911ff67a6363ce5a78f6cbe45199 100644 (file)
@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
+static void reset_one_i2c(struct bbc_i2c_bus *bp)
 {
        writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0);
        writeb(bp->own, bp->i2c_control_regs + 0x1);
@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
        writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);
 }
 
-static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index)
+static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index)
 {
        struct bbc_i2c_bus *bp;
        struct device_node *dp;
index 4c0f6d883dd3bf689e1900d47285000f39734d5e..7b0bce9367626ad987eb900528a7b329a2f32261 100644 (file)
@@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
                return 0;
        }
 }
+
+void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
+{
+       u32 pmu_ctl = 0;
+
+       switch (cc->dev->bus->chip_id) {
+       case 0x4322:
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
+               if (spuravoid == 1)
+                       ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
+               else
+                       ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
+               pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
+               break;
+       case 43222:
+               /* TODO: BCM43222 requires updating PLLs too */
+               return;
+       default:
+               ssb_printk(KERN_ERR PFX
+                          "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+                          cc->dev->bus->chip_id);
+               return;
+       }
+
+       chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
+}
+EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
index 7c254084b6a044da4658071a864e9dabbfdd16dc..86291dcd964a96088938b91fdaa79f6683104e8b 100644 (file)
@@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
 {
        struct fb_info *info = file_fb_info(file);
        struct fb_ops *fb;
-       unsigned long off;
+       unsigned long mmio_pgoff;
        unsigned long start;
        u32 len;
 
        if (!info)
                return -ENODEV;
-       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
-               return -EINVAL;
-       off = vma->vm_pgoff << PAGE_SHIFT;
        fb = info->fbops;
        if (!fb)
                return -ENODEV;
@@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
                return res;
        }
 
-       /* frame buffer memory */
+       /*
+        * Ugh. This can be either the frame buffer mapping, or
+        * if pgoff points past it, the mmio mapping.
+        */
        start = info->fix.smem_start;
-       len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
-       if (off >= len) {
-               /* memory mapped io */
-               off -= len;
-               if (info->var.accel_flags) {
-                       mutex_unlock(&info->mm_lock);
-                       return -EINVAL;
-               }
+       len = info->fix.smem_len;
+       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+       if (vma->vm_pgoff >= mmio_pgoff) {
+               vma->vm_pgoff -= mmio_pgoff;
                start = info->fix.mmio_start;
-               len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
+               len = info->fix.mmio_len;
        }
        mutex_unlock(&info->mm_lock);
-       start &= PAGE_MASK;
-       if ((vma->vm_end - vma->vm_start + off) > len)
-               return -EINVAL;
-       off += start;
-       vma->vm_pgoff = off >> PAGE_SHIFT;
-       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
+
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       fb_pgprotect(file, vma, off);
-       if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                            vma->vm_end - vma->vm_start, vma->vm_page_prot))
-               return -EAGAIN;
-       return 0;
+       fb_pgprotect(file, vma, start);
+
+       return vm_iomap_memory(vma, start, len);
 }
 
 static int
index 9ed83419038bbdd94da43f0d988732fcaefec188..84de2632857a335086b829597392e2a7e456cc54 100644 (file)
@@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path)
 
        kfree(path);
        mutex_unlock(&disp_lock);
-
-       dev_info(path->dev, "de-register %s\n", path->name);
 }
 EXPORT_SYMBOL_GPL(mmp_unregister_path);
index 3939829f6c5cc67e7d0ca1e9518f1df834be5088..86af964c2425d7077467bf54eaecde3d2d4c4911 100644 (file)
@@ -1137,6 +1137,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
                        goto whole;
                if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
                        goto whole;
+               return 0;
        }
 
        /* Do not dump I/O mapped devices or special mappings */
index bb5768f59b32e22fdcde250f0ae8b01eb6b69b48..b96fc6ce485595f0179bc909c807ae197258e671 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1428,8 +1428,6 @@ void bio_endio(struct bio *bio, int error)
        else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                error = -EIO;
 
-       trace_block_bio_complete(bio, error);
-
        if (bio->bi_end_io)
                bio->bi_end_io(bio, error);
 }
index a96a4885bbbfa130750caa70b9b3f504ea10b244..87e731f020fbe2493cba43df1a275201793c6942 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * when the old and new regions overlap clear from new_end.
                 */
                free_pgd_range(&tlb, new_end, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : 0);
+                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        } else {
                /*
                 * otherwise, clean from old_start; this is done to not touch
@@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * for the others its just a little faster.
                 */
                free_pgd_range(&tlb, old_start, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : 0);
+                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        }
        tlb_finish_mmu(&tlb, new_end, old_end);
 
index a94f0f779d5e3338281e3a1f5a0aaee6b16a01bc..fe0a76213d9eb6b39e291c415ccc4bf7af3a06e8 100644 (file)
@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)
                struct address_space *mapping = inode->i_mapping;
                struct page *page;
                void *fsdata;
-               u32 size = inode->i_size;
+               loff_t size = inode->i_size;
 
                res = pagecache_write_begin(NULL, mapping, size, 0,
                                                AOP_FLAG_UNINTERRUPTIBLE,
index 84e3d856e91d05c27aa182be0d28dead99128c93..523464e62849ccea755104d19e7ce39c2ca5fbfd 100644 (file)
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
         * way when do_mmap_pgoff unwinds (may be important on powerpc
         * and ia64).
         */
-       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
        vma->vm_ops = &hugetlb_vm_ops;
 
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
index f7ed9ee46eb9d3818d2210c100731e5eda6ee35e..cbd0f1b324b972b96f036139fcd96bf53a03fb01 100644 (file)
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
        "x (dead)",             /*  64 */
        "K (wakekill)",         /* 128 */
        "W (waking)",           /* 256 */
+       "P (parked)",           /* 512 */
 };
 
 static inline const char *get_task_state(struct task_struct *tsk)
index bfd87685fc1fd1fa32ea231c563205ed6f448a14..a59ff51b016695f54095e753cbfc2a5a6b684684 100644 (file)
@@ -7,6 +7,16 @@
 #include <linux/mm_types.h>
 #include <linux/bug.h>
 
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE.  However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING  0UL
+#endif
+
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 extern int ptep_set_access_flags(struct vm_area_struct *vma,
                                 unsigned long address, pte_t *ptep,
index 0ea61e07a91c03053833faf3a4250e1f6023a10d..7c2e030e72f10542612ba34ce79b18f6b1b8ad06 100644 (file)
@@ -12,7 +12,6 @@
 
 struct blk_trace {
        int trace_state;
-       bool rq_based;
        struct rchan *rchan;
        unsigned long __percpu *sequence;
        unsigned char __percpu *msg_data;
index 9bf2f1fcae27789e54336f053fecabe4655b1fbd..3d7df3d32c662f20a2a7c1b661c9d89f5f4b9dd3 100644 (file)
@@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
                                              unsigned long count,
                                              u64 *max_size,
                                              int *reset_type);
+typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
 
 /*
  *  EFI Configuration Table and GUID definitions
@@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
 #ifdef CONFIG_X86
 extern void efi_late_init(void);
 extern void efi_free_boot_services(void);
+extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
 #else
 static inline void efi_late_init(void) {}
 static inline void efi_free_boot_services(void) {}
+
+static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+       return EFI_SUCCESS;
+}
 #endif
 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
 extern u64 efi_get_iobase (void);
@@ -731,7 +738,7 @@ struct efivar_operations {
        efi_get_variable_t *get_variable;
        efi_get_next_variable_t *get_next_variable;
        efi_set_variable_t *set_variable;
-       efi_query_variable_info_t *query_variable_info;
+       efi_query_variable_store_t *query_variable_store;
 };
 
 struct efivars {
index d2e6927bbaae1eb65b5b1dd2575643a5562e329a..d78d28a733b15afdc25a620ca5925f6619f82a2c 100644 (file)
@@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size;
 
 int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
                unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+               unsigned long long *crash_size, unsigned long long *crash_base);
 int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
                unsigned long long *crash_size, unsigned long long *crash_base);
 int crash_shrink_memory(unsigned long new_size);
index e19ff30ad0a21453cece0df7524d0b336a69078f..e2091b88d24c972be51d80e1f110ad03c50d8902 100644 (file)
@@ -1611,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+
 
 struct page *follow_page_mask(struct vm_area_struct *vma,
                              unsigned long address, unsigned int foll_flags,
index 01d25e6fc792472692f6e601de77a7b83fe69734..0214c4c146faab0dad0a339dcd098124245dbe37 100644 (file)
@@ -291,6 +291,7 @@ ip_set_hash_destroy(struct ip_set *set)
 #define type_pf_data_tlist     TOKEN(TYPE, PF, _data_tlist)
 #define type_pf_data_next      TOKEN(TYPE, PF, _data_next)
 #define type_pf_data_flags     TOKEN(TYPE, PF, _data_flags)
+#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags)
 #ifdef IP_SET_HASH_WITH_NETS
 #define type_pf_data_match     TOKEN(TYPE, PF, _data_match)
 #else
@@ -385,9 +386,9 @@ type_pf_resize(struct ip_set *set, bool retried)
        struct ip_set_hash *h = set->data;
        struct htable *t, *orig = h->table;
        u8 htable_bits = orig->htable_bits;
-       const struct type_pf_elem *data;
+       struct type_pf_elem *data;
        struct hbucket *n, *m;
-       u32 i, j;
+       u32 i, j, flags = 0;
        int ret;
 
 retry:
@@ -412,9 +413,16 @@ retry:
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
                        data = ahash_data(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+                       flags = 0;
+                       type_pf_data_reset_flags(data, &flags);
+#endif
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
-                       ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0);
+                       ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags);
                        if (ret < 0) {
+#ifdef IP_SET_HASH_WITH_NETS
+                               type_pf_data_flags(data, flags);
+#endif
                                read_unlock_bh(&set->lock);
                                ahash_destroy(t);
                                if (ret == -EAGAIN)
@@ -836,9 +844,9 @@ type_pf_tresize(struct ip_set *set, bool retried)
        struct ip_set_hash *h = set->data;
        struct htable *t, *orig = h->table;
        u8 htable_bits = orig->htable_bits;
-       const struct type_pf_elem *data;
+       struct type_pf_elem *data;
        struct hbucket *n, *m;
-       u32 i, j;
+       u32 i, j, flags = 0;
        int ret;
 
        /* Try to cleanup once */
@@ -873,10 +881,17 @@ retry:
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
                        data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+                       flags = 0;
+                       type_pf_data_reset_flags(data, &flags);
+#endif
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
-                       ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
-                                               ip_set_timeout_get(type_pf_data_timeout(data)));
+                       ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags,
+                               ip_set_timeout_get(type_pf_data_timeout(data)));
                        if (ret < 0) {
+#ifdef IP_SET_HASH_WITH_NETS
+                               type_pf_data_flags(data, flags);
+#endif
                                read_unlock_bh(&set->lock);
                                ahash_destroy(t);
                                if (ret == -EAGAIN)
@@ -1187,6 +1202,7 @@ type_pf_gc_init(struct ip_set *set)
 #undef type_pf_data_tlist
 #undef type_pf_data_next
 #undef type_pf_data_flags
+#undef type_pf_data_reset_flags
 #undef type_pf_data_match
 
 #undef type_pf_elem
index d35d2b6ddbfb69f098b1660fe280b4d2da8a5ffe..e692a022527bdaaace8b388268b0c280946fc5c2 100644 (file)
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_DEAD              64
 #define TASK_WAKEKILL          128
 #define TASK_WAKING            256
-#define TASK_STATE_MAX         512
+#define TASK_PARKED            512
+#define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
index 9e492be5244b40d8ead3340aa7ac475cb51e56ff..6fcfe99bd999d295e0eb8efb967d0b2892335afc 100644 (file)
 #define SSB_CHIPCO_PMU_CTL                     0x0600 /* PMU control */
 #define  SSB_CHIPCO_PMU_CTL_ILP_DIV            0xFFFF0000 /* ILP div mask */
 #define  SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT      16
+#define  SSB_CHIPCO_PMU_CTL_PLL_UPD            0x00000400
 #define  SSB_CHIPCO_PMU_CTL_NOILPONW           0x00000200 /* No ILP on wait */
 #define  SSB_CHIPCO_PMU_CTL_HTREQEN            0x00000100 /* HT req enable */
 #define  SSB_CHIPCO_PMU_CTL_ALPREQEN           0x00000080 /* ALP req enable */
@@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id {
 void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
                             enum ssb_pmu_ldo_volt_id id, u32 voltage);
 void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
+void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
 
 #endif /* LINUX_SSB_CHIPCO_H_ */
index 2de42f9401d2599d4309028297c29a3bff9222c8..a5ffd32642fd6d46a8bad7e64eb2b791b9c4cdab 100644 (file)
@@ -25,6 +25,7 @@ extern int swiotlb_force;
 extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
+unsigned long swiotlb_size_or_default(void);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 
 /*
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
new file mode 100644 (file)
index 0000000..cbb20af
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef _LINUX_UCS2_STRING_H_
+#define _LINUX_UCS2_STRING_H_
+
+#include <linux/types.h>       /* for size_t */
+#include <linux/stddef.h>      /* for NULL */
+
+typedef u16 ucs2_char_t;
+
+unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
+unsigned long ucs2_strlen(const ucs2_char_t *s);
+unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+
+#endif /* _LINUX_UCS2_STRING_H_ */
index 40be2a0d8ae1bef7270b06a8a04cc9b9ab30b630..84a6440f1f19ee698dba94a7f899d77fc527e432 100644 (file)
@@ -199,6 +199,7 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 /* Device notifier */
 extern int register_inet6addr_notifier(struct notifier_block *nb);
 extern int unregister_inet6addr_notifier(struct notifier_block *nb);
+extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
 
 extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
                                         struct ipv6_devconf *devconf);
index f74109144d3fe544da2ff7ccf29f99884c22b32f..f132924cc9daec92b50427f705b88c3c350e70a3 100644 (file)
@@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
        return (self && self->lap) ? self->lap->daddr : 0;
 }
 
-extern const char *irlmp_reasons[];
+const char *irlmp_reason_str(LM_REASON reason);
+
 extern int sysctl_discovery_timeout;
 extern int sysctl_discovery_slots;
 extern int sysctl_discovery;
index 975cca01048bee3b7da9017725654053921250ae..b11708105681e04f5c24f7ed206f732e2c37dc02 100644 (file)
@@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
        scm->pid  = get_pid(pid);
        scm->cred = cred ? get_cred(cred) : NULL;
        scm->creds.pid = pid_vnr(pid);
-       scm->creds.uid = cred ? cred->euid : INVALID_UID;
-       scm->creds.gid = cred ? cred->egid : INVALID_GID;
+       scm->creds.uid = cred ? cred->uid : INVALID_UID;
+       scm->creds.gid = cred ? cred->gid : INVALID_GID;
 }
 
 static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
index 9961726523d01251957b2f8c661422c31d1ac78c..9c1467357b03c616967cd193efab6506e3e5adff 100644 (file)
@@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce,
 
 /**
  * block_bio_complete - completed all work on the block operation
+ * @q: queue holding the block operation
  * @bio: block operation completed
  * @error: io error value
  *
@@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce,
  */
 TRACE_EVENT(block_bio_complete,
 
-       TP_PROTO(struct bio *bio, int error),
+       TP_PROTO(struct request_queue *q, struct bio *bio, int error),
 
-       TP_ARGS(bio, error),
+       TP_ARGS(q, bio, error),
 
        TP_STRUCT__entry(
                __field( dev_t,         dev             )
@@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev ?
-                                         bio->bi_bdev->bd_dev : 0;
+               __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_sector;
                __entry->nr_sector      = bio->bi_size >> 9;
                __entry->error          = error;
index 5a8671e8a67ff8fa8f715311300901181555d78e..e5586caff67a973c962a3ed6bf43d4cc01664083 100644 (file)
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
                  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
-                               { 128, "W" }) : "R",
+                               { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
index 4c43b44487922301a3c7949ee6c07020e50e248e..706d035fa7488df73b142b1812c2c3feaa8708d3 100644 (file)
 #ifndef _LINUX_FUSE_H
 #define _LINUX_FUSE_H
 
-#ifdef __linux__
+#ifdef __KERNEL__
 #include <linux/types.h>
 #else
 #include <stdint.h>
-#define __u64 uint64_t
-#define __s64 int64_t
-#define __u32 uint32_t
-#define __s32 int32_t
-#define __u16 uint16_t
 #endif
 
 /*
    userspace works under 64bit kernels */
 
 struct fuse_attr {
-       __u64   ino;
-       __u64   size;
-       __u64   blocks;
-       __u64   atime;
-       __u64   mtime;
-       __u64   ctime;
-       __u32   atimensec;
-       __u32   mtimensec;
-       __u32   ctimensec;
-       __u32   mode;
-       __u32   nlink;
-       __u32   uid;
-       __u32   gid;
-       __u32   rdev;
-       __u32   blksize;
-       __u32   padding;
+       uint64_t        ino;
+       uint64_t        size;
+       uint64_t        blocks;
+       uint64_t        atime;
+       uint64_t        mtime;
+       uint64_t        ctime;
+       uint32_t        atimensec;
+       uint32_t        mtimensec;
+       uint32_t        ctimensec;
+       uint32_t        mode;
+       uint32_t        nlink;
+       uint32_t        uid;
+       uint32_t        gid;
+       uint32_t        rdev;
+       uint32_t        blksize;
+       uint32_t        padding;
 };
 
 struct fuse_kstatfs {
-       __u64   blocks;
-       __u64   bfree;
-       __u64   bavail;
-       __u64   files;
-       __u64   ffree;
-       __u32   bsize;
-       __u32   namelen;
-       __u32   frsize;
-       __u32   padding;
-       __u32   spare[6];
+       uint64_t        blocks;
+       uint64_t        bfree;
+       uint64_t        bavail;
+       uint64_t        files;
+       uint64_t        ffree;
+       uint32_t        bsize;
+       uint32_t        namelen;
+       uint32_t        frsize;
+       uint32_t        padding;
+       uint32_t        spare[6];
 };
 
 struct fuse_file_lock {
-       __u64   start;
-       __u64   end;
-       __u32   type;
-       __u32   pid; /* tgid */
+       uint64_t        start;
+       uint64_t        end;
+       uint32_t        type;
+       uint32_t        pid; /* tgid */
 };
 
 /**
@@ -364,143 +359,143 @@ enum fuse_notify_code {
 #define FUSE_COMPAT_ENTRY_OUT_SIZE 120
 
 struct fuse_entry_out {
-       __u64   nodeid;         /* Inode ID */
-       __u64   generation;     /* Inode generation: nodeid:gen must
-                                  be unique for the fs's lifetime */
-       __u64   entry_valid;    /* Cache timeout for the name */
-       __u64   attr_valid;     /* Cache timeout for the attributes */
-       __u32   entry_valid_nsec;
-       __u32   attr_valid_nsec;
+       uint64_t        nodeid;         /* Inode ID */
+       uint64_t        generation;     /* Inode generation: nodeid:gen must
+                                          be unique for the fs's lifetime */
+       uint64_t        entry_valid;    /* Cache timeout for the name */
+       uint64_t        attr_valid;     /* Cache timeout for the attributes */
+       uint32_t        entry_valid_nsec;
+       uint32_t        attr_valid_nsec;
        struct fuse_attr attr;
 };
 
 struct fuse_forget_in {
-       __u64   nlookup;
+       uint64_t        nlookup;
 };
 
 struct fuse_forget_one {
-       __u64   nodeid;
-       __u64   nlookup;
+       uint64_t        nodeid;
+       uint64_t        nlookup;
 };
 
 struct fuse_batch_forget_in {
-       __u32   count;
-       __u32   dummy;
+       uint32_t        count;
+       uint32_t        dummy;
 };
 
 struct fuse_getattr_in {
-       __u32   getattr_flags;
-       __u32   dummy;
-       __u64   fh;
+       uint32_t        getattr_flags;
+       uint32_t        dummy;
+       uint64_t        fh;
 };
 
 #define FUSE_COMPAT_ATTR_OUT_SIZE 96
 
 struct fuse_attr_out {
-       __u64   attr_valid;     /* Cache timeout for the attributes */
-       __u32   attr_valid_nsec;
-       __u32   dummy;
+       uint64_t        attr_valid;     /* Cache timeout for the attributes */
+       uint32_t        attr_valid_nsec;
+       uint32_t        dummy;
        struct fuse_attr attr;
 };
 
 #define FUSE_COMPAT_MKNOD_IN_SIZE 8
 
 struct fuse_mknod_in {
-       __u32   mode;
-       __u32   rdev;
-       __u32   umask;
-       __u32   padding;
+       uint32_t        mode;
+       uint32_t        rdev;
+       uint32_t        umask;
+       uint32_t        padding;
 };
 
 struct fuse_mkdir_in {
-       __u32   mode;
-       __u32   umask;
+       uint32_t        mode;
+       uint32_t        umask;
 };
 
 struct fuse_rename_in {
-       __u64   newdir;
+       uint64_t        newdir;
 };
 
 struct fuse_link_in {
-       __u64   oldnodeid;
+       uint64_t        oldnodeid;
 };
 
 struct fuse_setattr_in {
-       __u32   valid;
-       __u32   padding;
-       __u64   fh;
-       __u64   size;
-       __u64   lock_owner;
-       __u64   atime;
-       __u64   mtime;
-       __u64   unused2;
-       __u32   atimensec;
-       __u32   mtimensec;
-       __u32   unused3;
-       __u32   mode;
-       __u32   unused4;
-       __u32   uid;
-       __u32   gid;
-       __u32   unused5;
+       uint32_t        valid;
+       uint32_t        padding;
+       uint64_t        fh;
+       uint64_t        size;
+       uint64_t        lock_owner;
+       uint64_t        atime;
+       uint64_t        mtime;
+       uint64_t        unused2;
+       uint32_t        atimensec;
+       uint32_t        mtimensec;
+       uint32_t        unused3;
+       uint32_t        mode;
+       uint32_t        unused4;
+       uint32_t        uid;
+       uint32_t        gid;
+       uint32_t        unused5;
 };
 
 struct fuse_open_in {
-       __u32   flags;
-       __u32   unused;
+       uint32_t        flags;
+       uint32_t        unused;
 };
 
 struct fuse_create_in {
-       __u32   flags;
-       __u32   mode;
-       __u32   umask;
-       __u32   padding;
+       uint32_t        flags;
+       uint32_t        mode;
+       uint32_t        umask;
+       uint32_t        padding;
 };
 
 struct fuse_open_out {
-       __u64   fh;
-       __u32   open_flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint32_t        open_flags;
+       uint32_t        padding;
 };
 
 struct fuse_release_in {
-       __u64   fh;
-       __u32   flags;
-       __u32   release_flags;
-       __u64   lock_owner;
+       uint64_t        fh;
+       uint32_t        flags;
+       uint32_t        release_flags;
+       uint64_t        lock_owner;
 };
 
 struct fuse_flush_in {
-       __u64   fh;
-       __u32   unused;
-       __u32   padding;
-       __u64   lock_owner;
+       uint64_t        fh;
+       uint32_t        unused;
+       uint32_t        padding;
+       uint64_t        lock_owner;
 };
 
 struct fuse_read_in {
-       __u64   fh;
-       __u64   offset;
-       __u32   size;
-       __u32   read_flags;
-       __u64   lock_owner;
-       __u32   flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        read_flags;
+       uint64_t        lock_owner;
+       uint32_t        flags;
+       uint32_t        padding;
 };
 
 #define FUSE_COMPAT_WRITE_IN_SIZE 24
 
 struct fuse_write_in {
-       __u64   fh;
-       __u64   offset;
-       __u32   size;
-       __u32   write_flags;
-       __u64   lock_owner;
-       __u32   flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        write_flags;
+       uint64_t        lock_owner;
+       uint32_t        flags;
+       uint32_t        padding;
 };
 
 struct fuse_write_out {
-       __u32   size;
-       __u32   padding;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 #define FUSE_COMPAT_STATFS_SIZE 48
@@ -510,32 +505,32 @@ struct fuse_statfs_out {
 };
 
 struct fuse_fsync_in {
-       __u64   fh;
-       __u32   fsync_flags;
-       __u32   padding;
+       uint64_t        fh;
+       uint32_t        fsync_flags;
+       uint32_t        padding;
 };
 
 struct fuse_setxattr_in {
-       __u32   size;
-       __u32   flags;
+       uint32_t        size;
+       uint32_t        flags;
 };
 
 struct fuse_getxattr_in {
-       __u32   size;
-       __u32   padding;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 struct fuse_getxattr_out {
-       __u32   size;
-       __u32   padding;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 struct fuse_lk_in {
-       __u64   fh;
-       __u64   owner;
+       uint64_t        fh;
+       uint64_t        owner;
        struct fuse_file_lock lk;
-       __u32   lk_flags;
-       __u32   padding;
+       uint32_t        lk_flags;
+       uint32_t        padding;
 };
 
 struct fuse_lk_out {
@@ -543,134 +538,135 @@ struct fuse_lk_out {
 };
 
 struct fuse_access_in {
-       __u32   mask;
-       __u32   padding;
+       uint32_t        mask;
+       uint32_t        padding;
 };
 
 struct fuse_init_in {
-       __u32   major;
-       __u32   minor;
-       __u32   max_readahead;
-       __u32   flags;
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        max_readahead;
+       uint32_t        flags;
 };
 
 struct fuse_init_out {
-       __u32   major;
-       __u32   minor;
-       __u32   max_readahead;
-       __u32   flags;
-       __u16   max_background;
-       __u16   congestion_threshold;
-       __u32   max_write;
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        max_readahead;
+       uint32_t        flags;
+       uint16_t        max_background;
+       uint16_t        congestion_threshold;
+       uint32_t        max_write;
 };
 
 #define CUSE_INIT_INFO_MAX 4096
 
 struct cuse_init_in {
-       __u32   major;
-       __u32   minor;
-       __u32   unused;
-       __u32   flags;
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        unused;
+       uint32_t        flags;
 };
 
 struct cuse_init_out {
-       __u32   major;
-       __u32   minor;
-       __u32   unused;
-       __u32   flags;
-       __u32   max_read;
-       __u32   max_write;
-       __u32   dev_major;              /* chardev major */
-       __u32   dev_minor;              /* chardev minor */
-       __u32   spare[10];
+       uint32_t        major;
+       uint32_t        minor;
+       uint32_t        unused;
+       uint32_t        flags;
+       uint32_t        max_read;
+       uint32_t        max_write;
+       uint32_t        dev_major;              /* chardev major */
+       uint32_t        dev_minor;              /* chardev minor */
+       uint32_t        spare[10];
 };
 
 struct fuse_interrupt_in {
-       __u64   unique;
+       uint64_t        unique;
 };
 
 struct fuse_bmap_in {
-       __u64   block;
-       __u32   blocksize;
-       __u32   padding;
+       uint64_t        block;
+       uint32_t        blocksize;
+       uint32_t        padding;
 };
 
 struct fuse_bmap_out {
-       __u64   block;
+       uint64_t        block;
 };
 
 struct fuse_ioctl_in {
-       __u64   fh;
-       __u32   flags;
-       __u32   cmd;
-       __u64   arg;
-       __u32   in_size;
-       __u32   out_size;
+       uint64_t        fh;
+       uint32_t        flags;
+       uint32_t        cmd;
+       uint64_t        arg;
+       uint32_t        in_size;
+       uint32_t        out_size;
 };
 
 struct fuse_ioctl_iovec {
-       __u64   base;
-       __u64   len;
+       uint64_t        base;
+       uint64_t        len;
 };
 
 struct fuse_ioctl_out {
-       __s32   result;
-       __u32   flags;
-       __u32   in_iovs;
-       __u32   out_iovs;
+       int32_t         result;
+       uint32_t        flags;
+       uint32_t        in_iovs;
+       uint32_t        out_iovs;
 };
 
 struct fuse_poll_in {
-       __u64   fh;
-       __u64   kh;
-       __u32   flags;
-       __u32   events;
+       uint64_t        fh;
+       uint64_t        kh;
+       uint32_t        flags;
+       uint32_t        events;
 };
 
 struct fuse_poll_out {
-       __u32   revents;
-       __u32   padding;
+       uint32_t        revents;
+       uint32_t        padding;
 };
 
 struct fuse_notify_poll_wakeup_out {
-       __u64   kh;
+       uint64_t        kh;
 };
 
 struct fuse_fallocate_in {
-       __u64   fh;
-       __u64   offset;
-       __u64   length;
-       __u32   mode;
-       __u32   padding;
+       uint64_t        fh;
+       uint64_t        offset;
+       uint64_t        length;
+       uint32_t        mode;
+       uint32_t        padding;
 };
 
 struct fuse_in_header {
-       __u32   len;
-       __u32   opcode;
-       __u64   unique;
-       __u64   nodeid;
-       __u32   uid;
-       __u32   gid;
-       __u32   pid;
-       __u32   padding;
+       uint32_t        len;
+       uint32_t        opcode;
+       uint64_t        unique;
+       uint64_t        nodeid;
+       uint32_t        uid;
+       uint32_t        gid;
+       uint32_t        pid;
+       uint32_t        padding;
 };
 
 struct fuse_out_header {
-       __u32   len;
-       __s32   error;
-       __u64   unique;
+       uint32_t        len;
+       int32_t         error;
+       uint64_t        unique;
 };
 
 struct fuse_dirent {
-       __u64   ino;
-       __u64   off;
-       __u32   namelen;
-       __u32   type;
+       uint64_t        ino;
+       uint64_t        off;
+       uint32_t        namelen;
+       uint32_t        type;
        char name[];
 };
 
 #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1))
+#define FUSE_DIRENT_ALIGN(x) \
+       (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
 #define FUSE_DIRENT_SIZE(d) \
        FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
 
@@ -685,47 +681,47 @@ struct fuse_direntplus {
        FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen)
 
 struct fuse_notify_inval_inode_out {
-       __u64   ino;
-       __s64   off;
-       __s64   len;
+       uint64_t        ino;
+       int64_t         off;
+       int64_t         len;
 };
 
 struct fuse_notify_inval_entry_out {
-       __u64   parent;
-       __u32   namelen;
-       __u32   padding;
+       uint64_t        parent;
+       uint32_t        namelen;
+       uint32_t        padding;
 };
 
 struct fuse_notify_delete_out {
-       __u64   parent;
-       __u64   child;
-       __u32   namelen;
-       __u32   padding;
+       uint64_t        parent;
+       uint64_t        child;
+       uint32_t        namelen;
+       uint32_t        padding;
 };
 
 struct fuse_notify_store_out {
-       __u64   nodeid;
-       __u64   offset;
-       __u32   size;
-       __u32   padding;
+       uint64_t        nodeid;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 struct fuse_notify_retrieve_out {
-       __u64   notify_unique;
-       __u64   nodeid;
-       __u64   offset;
-       __u32   size;
-       __u32   padding;
+       uint64_t        notify_unique;
+       uint64_t        nodeid;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        padding;
 };
 
 /* Matches the size of fuse_write_in */
 struct fuse_notify_retrieve_in {
-       __u64   dummy1;
-       __u64   offset;
-       __u32   size;
-       __u32   dummy2;
-       __u64   dummy3;
-       __u64   dummy4;
+       uint64_t        dummy1;
+       uint64_t        offset;
+       uint32_t        size;
+       uint32_t        dummy2;
+       uint64_t        dummy3;
+       uint64_t        dummy4;
 };
 
 #endif /* _LINUX_FUSE_H */
index 7e0962ed7f8ac850092323e56cbcb73a5a399a13..4d3124b392774322ae1b33f167a10c1046d886d9 100644 (file)
@@ -5331,7 +5331,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
 
 static int perf_swevent_init(struct perf_event *event)
 {
-       int event_id = event->attr.config;
+       u64 event_id = event->attr.config;
 
        if (event->attr.type != PERF_TYPE_SOFTWARE)
                return -ENOENT;
index cc47812d3feb0c86cbcf8f03e6ee81340ef7c57c..14be27feda491da1c3dc9990a5ae80ce649570aa 100644 (file)
@@ -63,6 +63,7 @@
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
 
+       .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
        .clock_base =
        {
                {
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
        int i;
 
-       raw_spin_lock_init(&cpu_base->lock);
-
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                cpu_base->clock_base[i].cpu_base = cpu_base;
                timerqueue_init_head(&cpu_base->clock_base[i].active);
index bddd3d7a74b688a1c9c2e68481b77363558124c7..ffd4e111fd67e7d7b29199b6206957f6c458a350 100644 (file)
@@ -55,7 +55,7 @@ struct resource crashk_res = {
        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
 };
 struct resource crashk_low_res = {
-       .name  = "Crash kernel low",
+       .name  = "Crash kernel",
        .start = 0,
        .end   = 0,
        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
@@ -1368,35 +1368,114 @@ static int __init parse_crashkernel_simple(char                *cmdline,
        return 0;
 }
 
+#define SUFFIX_HIGH 0
+#define SUFFIX_LOW  1
+#define SUFFIX_NULL 2
+static __initdata char *suffix_tbl[] = {
+       [SUFFIX_HIGH] = ",high",
+       [SUFFIX_LOW]  = ",low",
+       [SUFFIX_NULL] = NULL,
+};
+
 /*
- * That function is the entry point for command line parsing and should be
- * called from the arch-specific code.
+ * That function parses "suffix"  crashkernel command lines like
+ *
+ *     crashkernel=size,[high|low]
+ *
+ * It returns 0 on success and -EINVAL on failure.
  */
+static int __init parse_crashkernel_suffix(char *cmdline,
+                                          unsigned long long   *crash_size,
+                                          unsigned long long   *crash_base,
+                                          const char *suffix)
+{
+       char *cur = cmdline;
+
+       *crash_size = memparse(cmdline, &cur);
+       if (cmdline == cur) {
+               pr_warn("crashkernel: memory value expected\n");
+               return -EINVAL;
+       }
+
+       /* check with suffix */
+       if (strncmp(cur, suffix, strlen(suffix))) {
+               pr_warn("crashkernel: unrecognized char\n");
+               return -EINVAL;
+       }
+       cur += strlen(suffix);
+       if (*cur != ' ' && *cur != '\0') {
+               pr_warn("crashkernel: unrecognized char\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static __init char *get_last_crashkernel(char *cmdline,
+                            const char *name,
+                            const char *suffix)
+{
+       char *p = cmdline, *ck_cmdline = NULL;
+
+       /* find crashkernel and use the last one if there are more */
+       p = strstr(p, name);
+       while (p) {
+               char *end_p = strchr(p, ' ');
+               char *q;
+
+               if (!end_p)
+                       end_p = p + strlen(p);
+
+               if (!suffix) {
+                       int i;
+
+                       /* skip the one with any known suffix */
+                       for (i = 0; suffix_tbl[i]; i++) {
+                               q = end_p - strlen(suffix_tbl[i]);
+                               if (!strncmp(q, suffix_tbl[i],
+                                            strlen(suffix_tbl[i])))
+                                       goto next;
+                       }
+                       ck_cmdline = p;
+               } else {
+                       q = end_p - strlen(suffix);
+                       if (!strncmp(q, suffix, strlen(suffix)))
+                               ck_cmdline = p;
+               }
+next:
+               p = strstr(p+1, name);
+       }
+
+       if (!ck_cmdline)
+               return NULL;
+
+       return ck_cmdline;
+}
+
 static int __init __parse_crashkernel(char *cmdline,
                             unsigned long long system_ram,
                             unsigned long long *crash_size,
                             unsigned long long *crash_base,
-                               const char *name)
+                            const char *name,
+                            const char *suffix)
 {
-       char    *p = cmdline, *ck_cmdline = NULL;
        char    *first_colon, *first_space;
+       char    *ck_cmdline;
 
        BUG_ON(!crash_size || !crash_base);
        *crash_size = 0;
        *crash_base = 0;
 
-       /* find crashkernel and use the last one if there are more */
-       p = strstr(p, name);
-       while (p) {
-               ck_cmdline = p;
-               p = strstr(p+1, name);
-       }
+       ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
 
        if (!ck_cmdline)
                return -EINVAL;
 
        ck_cmdline += strlen(name);
 
+       if (suffix)
+               return parse_crashkernel_suffix(ck_cmdline, crash_size,
+                               crash_base, suffix);
        /*
         * if the commandline contains a ':', then that's the extended
         * syntax -- if not, it must be the classic syntax
@@ -1413,13 +1492,26 @@ static int __init __parse_crashkernel(char *cmdline,
        return 0;
 }
 
+/*
+ * That function is the entry point for command line parsing and should be
+ * called from the arch-specific code.
+ */
 int __init parse_crashkernel(char *cmdline,
                             unsigned long long system_ram,
                             unsigned long long *crash_size,
                             unsigned long long *crash_base)
 {
        return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
-                                       "crashkernel=");
+                                       "crashkernel=", NULL);
+}
+
+int __init parse_crashkernel_high(char *cmdline,
+                            unsigned long long system_ram,
+                            unsigned long long *crash_size,
+                            unsigned long long *crash_base)
+{
+       return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
+                               "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
 }
 
 int __init parse_crashkernel_low(char *cmdline,
@@ -1428,7 +1520,7 @@ int __init parse_crashkernel_low(char *cmdline,
                             unsigned long long *crash_base)
 {
        return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
-                                       "crashkernel_low=");
+                               "crashkernel=", suffix_tbl[SUFFIX_LOW]);
 }
 
 static void update_vmcoreinfo_note(void)
index e35be53f6613c4fd08c093b6865cd6f43e0b9a89..3fed7f0cbcdfe3d2149dd903e6912628ba58da95 100644 (file)
@@ -794,16 +794,16 @@ out:
 }
 
 #ifdef CONFIG_SYSCTL
-/* This should be called with kprobe_mutex locked */
 static void __kprobes optimize_all_kprobes(void)
 {
        struct hlist_head *head;
        struct kprobe *p;
        unsigned int i;
 
+       mutex_lock(&kprobe_mutex);
        /* If optimization is already allowed, just return */
        if (kprobes_allow_optimization)
-               return;
+               goto out;
 
        kprobes_allow_optimization = true;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void)
                                optimize_kprobe(p);
        }
        printk(KERN_INFO "Kprobes globally optimized\n");
+out:
+       mutex_unlock(&kprobe_mutex);
 }
 
-/* This should be called with kprobe_mutex locked */
 static void __kprobes unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
        struct kprobe *p;
        unsigned int i;
 
+       mutex_lock(&kprobe_mutex);
        /* If optimization is already prohibited, just return */
-       if (!kprobes_allow_optimization)
+       if (!kprobes_allow_optimization) {
+               mutex_unlock(&kprobe_mutex);
                return;
+       }
 
        kprobes_allow_optimization = false;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void)
                                unoptimize_kprobe(p, false);
                }
        }
+       mutex_unlock(&kprobe_mutex);
+
        /* Wait for unoptimizing completion */
        wait_for_kprobe_optimizer();
        printk(KERN_INFO "Kprobes globally unoptimized\n");
 }
 
+static DEFINE_MUTEX(kprobe_sysctl_mutex);
 int sysctl_kprobes_optimization;
 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
                                      void __user *buffer, size_t *length,
@@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 {
        int ret;
 
-       mutex_lock(&kprobe_mutex);
+       mutex_lock(&kprobe_sysctl_mutex);
        sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 
@@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
                optimize_all_kprobes();
        else
                unoptimize_all_kprobes();
-       mutex_unlock(&kprobe_mutex);
+       mutex_unlock(&kprobe_sysctl_mutex);
 
        return ret;
 }
index 691dc2ef9baf241c121144799360a7e1237afcb5..9eb7fed0bbaa9895973a14e551c76de31fffe533 100644 (file)
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
 
 static void __kthread_parkme(struct kthread *self)
 {
-       __set_current_state(TASK_INTERRUPTIBLE);
+       __set_current_state(TASK_PARKED);
        while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
                if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
                        complete(&self->parked);
                schedule();
-               __set_current_state(TASK_INTERRUPTIBLE);
+               __set_current_state(TASK_PARKED);
        }
        clear_bit(KTHREAD_IS_PARKED, &self->flags);
        __set_current_state(TASK_RUNNING);
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 }
 EXPORT_SYMBOL(kthread_create_on_node);
 
-static void __kthread_bind(struct task_struct *p, unsigned int cpu)
+static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 {
+       /* Must have done schedule() in kthread() before we set_task_cpu */
+       if (!wait_task_inactive(p, state)) {
+               WARN_ON(1);
+               return;
+       }
        /* It's safe because the task is inactive. */
        do_set_cpus_allowed(p, cpumask_of(cpu));
        p->flags |= PF_THREAD_BOUND;
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
  */
 void kthread_bind(struct task_struct *p, unsigned int cpu)
 {
-       /* Must have done schedule() in kthread() before we set_task_cpu */
-       if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
-               WARN_ON(1);
-               return;
-       }
-       __kthread_bind(p, cpu);
+       __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
        return NULL;
 }
 
+static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
+{
+       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       /*
+        * We clear the IS_PARKED bit here as we don't wait
+        * until the task has left the park code. So if we'd
+        * park before that happens we'd see the IS_PARKED bit
+        * which might be about to be cleared.
+        */
+       if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+               if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+                       __kthread_bind(k, kthread->cpu, TASK_PARKED);
+               wake_up_state(k, TASK_PARKED);
+       }
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
 {
        struct kthread *kthread = task_get_live_kthread(k);
 
-       if (kthread) {
-               clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
-               /*
-                * We clear the IS_PARKED bit here as we don't wait
-                * until the task has left the park code. So if we'd
-                * park before that happens we'd see the IS_PARKED bit
-                * which might be about to be cleared.
-                */
-               if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-                       if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
-                               __kthread_bind(k, kthread->cpu);
-                       wake_up_process(k);
-               }
-       }
+       if (kthread)
+               __kthread_unpark(k, kthread);
        put_task_struct(k);
 }
 
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
        trace_sched_kthread_stop(k);
        if (kthread) {
                set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
-               clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+               __kthread_unpark(k, kthread);
                wake_up_process(k);
                wait_for_completion(&kthread->exited);
        }
index dd72567767d963ccb8ac2f3cfa4a824fa5f81b4e..598dc06be4214c7a11fbf128baff984fa9c1c4b5 100644 (file)
@@ -2948,7 +2948,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
 
 static int do_tkill(pid_t tgid, pid_t pid, int sig)
 {
-       struct siginfo info;
+       struct siginfo info = {};
 
        info.si_signo = sig;
        info.si_errno = 0;
index 8eaed9aa9cf0c1995520605af1de8ef3b9e95485..02fc5c9336735a834378dbbb6fff8e57b919b0fa 100644 (file)
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
        }
        get_task_struct(tsk);
        *per_cpu_ptr(ht->store, cpu) = tsk;
-       if (ht->create)
-               ht->create(cpu);
+       if (ht->create) {
+               /*
+                * Make sure that the task has actually scheduled out
+                * into park position, before calling the create
+                * callback. At least the migration thread callback
+                * requires that the task is off the runqueue.
+                */
+               if (!wait_task_inactive(tsk, TASK_PARKED))
+                       WARN_ON(1);
+               else
+                       ht->create(cpu);
+       }
        return 0;
 }
 
index 9e5b8c272eecc9121e8d3e064f30220c71c7e957..5a0f781cd729870892d105f24cc552a9d28b23e6 100644 (file)
@@ -739,12 +739,6 @@ static void blk_add_trace_rq_complete(void *ignore,
                                      struct request_queue *q,
                                      struct request *rq)
 {
-       struct blk_trace *bt = q->blk_trace;
-
-       /* if control ever passes through here, it's a request based driver */
-       if (unlikely(bt && !bt->rq_based))
-               bt->rq_based = true;
-
        blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
 }
 
@@ -780,24 +774,10 @@ static void blk_add_trace_bio_bounce(void *ignore,
        blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
-static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
+static void blk_add_trace_bio_complete(void *ignore,
+                                      struct request_queue *q, struct bio *bio,
+                                      int error)
 {
-       struct request_queue *q;
-       struct blk_trace *bt;
-
-       if (!bio->bi_bdev)
-               return;
-
-       q = bdev_get_queue(bio->bi_bdev);
-       bt = q->blk_trace;
-
-       /*
-        * Request based drivers will generate both rq and bio completions.
-        * Ignore bio ones.
-        */
-       if (likely(!bt) || bt->rq_based)
-               return;
-
        blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
index a54f26f82eb250a60c7f24ecc651c6c1bbf8cc88..e134d8f365ddea425ac6a8df541875038928298d 100644 (file)
@@ -25,7 +25,8 @@
 
 static struct kmem_cache *user_ns_cachep __read_mostly;
 
-static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
+static bool new_idmap_permitted(const struct file *file,
+                               struct user_namespace *ns, int cap_setid,
                                struct uid_gid_map *map);
 
 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
@@ -612,10 +613,10 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        if (map->nr_extents != 0)
                goto out;
 
-       /* Require the appropriate privilege CAP_SETUID or CAP_SETGID
-        * over the user namespace in order to set the id mapping.
+       /*
+        * Adjusting namespace settings requires capabilities on the target.
         */
-       if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid))
+       if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
                goto out;
 
        /* Get a buffer */
@@ -700,7 +701,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
 
        ret = -EPERM;
        /* Validate the user is allowed to use user id's mapped to. */
-       if (!new_idmap_permitted(ns, cap_setid, &new_map))
+       if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
                goto out;
 
        /* Map the lower ids from the parent user namespace to the
@@ -787,7 +788,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t
                         &ns->projid_map, &ns->parent->projid_map);
 }
 
-static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
+static bool new_idmap_permitted(const struct file *file, 
+                               struct user_namespace *ns, int cap_setid,
                                struct uid_gid_map *new_map)
 {
        /* Allow mapping to your own filesystem ids */
@@ -795,12 +797,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
                u32 id = new_map->extent[0].lower_first;
                if (cap_setid == CAP_SETUID) {
                        kuid_t uid = make_kuid(ns->parent, id);
-                       if (uid_eq(uid, current_fsuid()))
+                       if (uid_eq(uid, file->f_cred->fsuid))
                                return true;
                }
                else if (cap_setid == CAP_SETGID) {
                        kgid_t gid = make_kgid(ns->parent, id);
-                       if (gid_eq(gid, current_fsgid()))
+                       if (gid_eq(gid, file->f_cred->fsgid))
                                return true;
                }
        }
@@ -811,8 +813,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
 
        /* Allow the specified ids if we have the appropriate capability
         * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
+        * And the opener of the id file also had the approprpiate capability.
         */
-       if (ns_capable(ns->parent, cap_setid))
+       if (ns_capable(ns->parent, cap_setid) &&
+           file_ns_capable(file, ns->parent, cap_setid))
                return true;
 
        return false;
index 3958dc4389f993cc204670706e834c42be80165b..fe01d418b09ae4d13a15b59039ef27602d1ad0f3 100644 (file)
@@ -404,4 +404,7 @@ config OID_REGISTRY
        help
          Enable fast lookup object identifier registry.
 
+config UCS2_STRING
+        tristate
+
 endmenu
index d7946ff75b2ea0f4baca2716aba3f41577f9b904..6e2cc561f761af79f2d555ca688bbcfe676de602 100644 (file)
@@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN     $@
       cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
 
 clean-files    += oid_registry_data.c
+
+obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
index bfe02b8fc55b3d1c383832148e4e5aa44e0ef87c..d23762e6652c1e02bf87f070d3164afa93dc91a8 100644 (file)
@@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str)
        if (!strcmp(str, "force"))
                swiotlb_force = 1;
 
-       return 1;
+       return 0;
 }
-__setup("swiotlb=", setup_io_tlb_npages);
+early_param("swiotlb", setup_io_tlb_npages);
 /* make io_tlb_overflow tunable too? */
 
 unsigned long swiotlb_nr_tbl(void)
@@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void)
        return io_tlb_nslabs;
 }
 EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
+unsigned long swiotlb_size_or_default(void)
+{
+       unsigned long size;
+
+       size = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       return size ? size : (IO_TLB_DEFAULT_SIZE);
+}
+
 /* Note that this doesn't work with highmem page */
 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
                                      volatile void *address)
@@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 void  __init
 swiotlb_init(int verbose)
 {
-       /* default to 64MB */
-       size_t default_size = 64UL<<20;
+       size_t default_size = IO_TLB_DEFAULT_SIZE;
        unsigned char *vstart;
        unsigned long bytes;
 
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
new file mode 100644 (file)
index 0000000..6f500ef
--- /dev/null
@@ -0,0 +1,51 @@
+#include <linux/ucs2_string.h>
+#include <linux/module.h>
+
+/* Return the number of unicode characters in data */
+unsigned long
+ucs2_strnlen(const ucs2_char_t *s, size_t maxlength)
+{
+        unsigned long length = 0;
+
+        while (*s++ != 0 && length < maxlength)
+                length++;
+        return length;
+}
+EXPORT_SYMBOL(ucs2_strnlen);
+
+unsigned long
+ucs2_strlen(const ucs2_char_t *s)
+{
+        return ucs2_strnlen(s, ~0UL);
+}
+EXPORT_SYMBOL(ucs2_strlen);
+
+/*
+ * Return the number of bytes is the length of this string
+ * Note: this is NOT the same as the number of unicode characters
+ */
+unsigned long
+ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
+{
+        return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t);
+}
+EXPORT_SYMBOL(ucs2_strsize);
+
+int
+ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+{
+        while (1) {
+                if (len == 0)
+                        return 0;
+                if (*a < *b)
+                        return -1;
+                if (*a > *b)
+                        return 1;
+                if (*a == 0) /* implies *b == 0 */
+                        return 0;
+                a++;
+                b++;
+                len--;
+        }
+}
+EXPORT_SYMBOL(ucs2_strncmp);
index ca9a7c6d7e973c5db4bbecd7e72139d5c9529b72..1a12f5b9a0aba45396fccc46d14d2b6534c30263 100644 (file)
@@ -2961,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        break;
                }
 
-               if (absent ||
+               /*
+                * We need call hugetlb_fault for both hugepages under migration
+                * (in which case hugetlb_fault waits for the migration,) and
+                * hwpoisoned hugepages (in which case we need to prevent the
+                * caller from accessing to them.) In order to do this, we use
+                * here is_swap_pte instead of is_hugetlb_entry_migration and
+                * is_hugetlb_entry_hwpoisoned. This is because it simply covers
+                * both cases, and because we can't follow correct pages
+                * directly from any kind of swap entries.
+                */
+               if (absent || is_swap_pte(huge_ptep_get(pte)) ||
                    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
index 13cbc420fead06d96f82b437a67d03b6ff9d5fca..ba94dec5b25900e47093544e11f8c765bf243f9d 100644 (file)
@@ -2393,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
+/**
+ * vm_iomap_memory - remap memory to userspace
+ * @vma: user vma to map to
+ * @start: start of area
+ * @len: size of area
+ *
+ * This is a simplified io_remap_pfn_range() for common driver use. The
+ * driver just needs to give us the physical memory range to be mapped,
+ * we'll figure out the rest from the vma information.
+ *
+ * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
+ * whatever write-combining details or similar.
+ */
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
+{
+       unsigned long vm_len, pfn, pages;
+
+       /* Check that the physical memory area passed in looks valid */
+       if (start + len < start)
+               return -EINVAL;
+       /*
+        * You *really* shouldn't map things that aren't page-aligned,
+        * but we've historically allowed it because IO memory might
+        * just have smaller alignment.
+        */
+       len += start & ~PAGE_MASK;
+       pfn = start >> PAGE_SHIFT;
+       pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
+       if (pfn + pages < pfn)
+               return -EINVAL;
+
+       /* We start the mapping 'vm_pgoff' pages into the area */
+       if (vma->vm_pgoff > pages)
+               return -EINVAL;
+       pfn += vma->vm_pgoff;
+       pages -= vma->vm_pgoff;
+
+       /* Can we fit all of the mapping? */
+       vm_len = vma->vm_end - vma->vm_start;
+       if (vm_len >> PAGE_SHIFT > pages)
+               return -EINVAL;
+
+       /* Ok, let it rip */
+       return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_iomap_memory);
+
 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     unsigned long addr, unsigned long end,
                                     pte_fn_t fn, void *data)
index 0db0de1c2fbee21e0e9919af66021b190d789731..033094ba62dce349a03345a4efe3a15b9bbe328e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2305,7 +2305,7 @@ static void unmap_region(struct mm_struct *mm,
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end);
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
-                                next ? next->vm_start : 0);
+                                next ? next->vm_start : USER_PGTABLES_CEILING);
        tlb_finish_mmu(&tlb, start, end);
 }
 
@@ -2685,7 +2685,7 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
 
-       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
        tlb_finish_mmu(&tlb, 0, -1);
 
        /*
index 88c5fed8b9a4bd9a54021810cf05710cff4fcbfc..669fba39be1aa41bb9adfd040c709e43b194cfe2 100644 (file)
@@ -3188,9 +3188,9 @@ int kswapd_run(int nid)
        if (IS_ERR(pgdat->kswapd)) {
                /* failure at boot is fatal */
                BUG_ON(system_state == SYSTEM_BOOTING);
-               pgdat->kswapd = NULL;
                pr_err("Failed to start kswapd on node %d\n", nid);
                ret = PTR_ERR(pgdat->kswapd);
+               pgdat->kswapd = NULL;
        }
        return ret;
 }
index a4cc3229952a17d387411cded9bc66d940b9929f..e085bcc754f602e4401e51c8c29442a1bd143de8 100644 (file)
@@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
         * all pending messages before the applicant is gone.
         */
        del_timer_sync(&app->join_timer);
+
+       spin_lock(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
        mrp_pdu_queue(app);
+       spin_unlock(&app->lock);
+
        mrp_queue_xmit(app);
 
        dev_mc_del(dev, appl->group_address);
index 0488d70c8c3512bef0311f1611d0a4a362b9057e..fa563e497c4861a3fd1656a91887eed47e81ec81 100644 (file)
@@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
 }
 
-int batadv_is_my_mac(const uint8_t *addr)
+int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
 {
        const struct batadv_hard_iface *hard_iface;
 
@@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr)
                if (hard_iface->if_status != BATADV_IF_ACTIVE)
                        continue;
 
+               if (hard_iface->soft_iface != bat_priv->soft_iface)
+                       continue;
+
                if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
                        rcu_read_unlock();
                        return 1;
index ced08b936a9690a6f7d01af7f7eb8a31f3e1f29d..d40910dfc8ea5a20fbe2c839f59bd65018bf1c28 100644 (file)
@@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue;
 
 int batadv_mesh_init(struct net_device *soft_iface);
 void batadv_mesh_free(struct net_device *soft_iface);
-int batadv_is_my_mac(const uint8_t *addr);
+int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
index 5ee21cebbbb0953fd7708b84761f379d8a1423e5..319f2906c71a2146328c1e6ad1c4e9cbedee1e53 100644 (file)
@@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                goto out;
 
        /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
+       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                goto out;
 
        icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
@@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        }
 
        /* packet for me */
-       if (batadv_is_my_mac(icmp_packet->dst))
+       if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
                return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
 
        /* TTL exceeded */
@@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
        return router;
 }
 
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
+static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
+                                      struct sk_buff *skb, int hdr_size)
 {
        struct ethhdr *ethhdr;
 
@@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
                return -1;
 
        /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
+       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                return -1;
 
        return 0;
@@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
        char tt_flag;
        size_t packet_size;
 
-       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
 
        /* I could need to modify it */
@@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
        case BATADV_TT_RESPONSE:
                batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
 
-               if (batadv_is_my_mac(tt_query->dst)) {
+               if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
                        /* packet needs to be linearized to access the TT
                         * changes
                         */
@@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
        struct batadv_roam_adv_packet *roam_adv_packet;
        struct batadv_orig_node *orig_node;
 
-       if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb,
+                                       sizeof(*roam_adv_packet)) < 0)
                goto out;
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
 
        roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
 
-       if (!batadv_is_my_mac(roam_adv_packet->dst))
+       if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
                return batadv_route_unicast_packet(skb, recv_if);
 
        /* check if it is a backbone gateway. we don't accept
@@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * last time) the packet had an updated information or not
         */
        curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-       if (!batadv_is_my_mac(unicast_packet->dest)) {
+       if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
                orig_node = batadv_orig_hash_find(bat_priv,
                                                  unicast_packet->dest);
                /* if it is not possible to find the orig_node representing the
@@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
        if (is4addr)
                hdr_size = sizeof(*unicast_4addr_packet);
 
-       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
 
        if (!batadv_check_unicast_ttvn(bat_priv, skb))
                return NET_RX_DROP;
 
        /* packet for me */
-       if (batadv_is_my_mac(unicast_packet->dest)) {
+       if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
                if (is4addr) {
                        batadv_dat_inc_counter(bat_priv,
                                               unicast_4addr_packet->subtype);
@@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
        struct sk_buff *new_skb = NULL;
        int ret;
 
-       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
 
        if (!batadv_check_unicast_ttvn(bat_priv, skb))
@@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
        unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
 
        /* packet for me */
-       if (batadv_is_my_mac(unicast_packet->dest)) {
+       if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
                ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
 
                if (ret == NET_RX_DROP)
@@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
                goto out;
 
        /* ignore broadcasts sent by myself */
-       if (batadv_is_my_mac(ethhdr->h_source))
+       if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
                goto out;
 
        bcast_packet = (struct batadv_bcast_packet *)skb->data;
 
        /* ignore broadcasts originated by myself */
-       if (batadv_is_my_mac(bcast_packet->orig))
+       if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
                goto out;
 
        if (bcast_packet->header.ttl < 2)
@@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb,
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
+       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                return NET_RX_DROP;
 
        /* ignore own packets */
-       if (batadv_is_my_mac(vis_packet->vis_orig))
+       if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
                return NET_RX_DROP;
 
-       if (batadv_is_my_mac(vis_packet->sender_orig))
+       if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
                return NET_RX_DROP;
 
        switch (vis_packet->vis_type) {
index 98a66a021a602678d515df58199895bceef31a92..7abee19567e9e0f84ecb224e666f2e6c1094333c 100644 (file)
@@ -1953,7 +1953,7 @@ out:
 bool batadv_send_tt_response(struct batadv_priv *bat_priv,
                             struct batadv_tt_query_packet *tt_request)
 {
-       if (batadv_is_my_mac(tt_request->dst)) {
+       if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
                /* don't answer backbone gws! */
                if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
                        return true;
index c053244b97bd38e03a588c1c629e6081d06b2154..6a1e646be96d45b21e21127a056a9aad3af91bb1 100644 (file)
@@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
 
        /* Are we the target for this VIS packet? */
        if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC   &&
-           batadv_is_my_mac(vis_packet->target_orig))
+           batadv_is_my_mac(bat_priv, vis_packet->target_orig))
                are_target = 1;
 
        spin_lock_bh(&bat_priv->vis.hash_lock);
@@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
                batadv_send_list_add(bat_priv, info);
 
                /* ... we're not the recipient (and thus need to forward). */
-       } else if (!batadv_is_my_mac(packet->target_orig)) {
+       } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
                batadv_send_list_add(bat_priv, info);
        }
 
index ef1b91431c6bb1e0627119f877eca0d2339a899c..459dab22b3f6009dc027c01e29fe587270291053 100644 (file)
@@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p)
        struct net_device *dev = p->dev;
        struct net_bridge *br = p->br;
 
-       if (netif_running(dev) && netif_oper_up(dev))
+       if (!(p->flags & BR_ADMIN_COST) &&
+           netif_running(dev) && netif_oper_up(dev))
                p->path_cost = port_cost(dev);
 
        if (!netif_running(br->dev))
index 3cbf5beb3d4be267fbea19a2fd81b7b09d3d6a0c..d2c043a857b6a0fd3bfd208f018179b96c42dacd 100644 (file)
@@ -156,6 +156,7 @@ struct net_bridge_port
 #define BR_BPDU_GUARD           0x00000002
 #define BR_ROOT_BLOCK          0x00000004
 #define BR_MULTICAST_FAST_LEAVE        0x00000008
+#define BR_ADMIN_COST          0x00000010
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        u32                             multicast_startup_queries_sent;
index 0bdb4ebd362b7818229bb50f13d25f7e1181dcad..d45e760141bb81a34909b98724b2d8f3fc876136 100644 (file)
@@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
            path_cost > BR_MAX_PATH_COST)
                return -ERANGE;
 
+       p->flags |= BR_ADMIN_COST;
        p->path_cost = path_cost;
        br_configuration_update(p->br);
        br_port_state_selection(p->br);
index e7d68ed8aafe60f75233fcdc1ed68c39e755cda6..b24ab0e98eb4d8dd8cef2a20ef2a39b97d9ece92 100644 (file)
@@ -2148,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
        struct net_device *dev = skb->dev;
        const char *driver = "";
 
+       if (!net_ratelimit())
+               return;
+
        if (dev && dev->dev.parent)
                driver = dev_driver_string(dev->dev.parent);
 
index 3b4f0cd2e63edbd136683577873b288712a4b92a..4cfe34d4cc967a94ed15f2deef3d249d2429e846 100644 (file)
@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 
        /* skb is pure payload to encrypt */
 
-       err = -ENOMEM;
-
        esp = x->data;
        aead = esp->aead;
        alen = crypto_aead_authsize(aead);
@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
-       if (!tmp)
+       if (!tmp) {
+               err = -ENOMEM;
                goto error;
+       }
 
        seqhi = esp_tmp_seqhi(tmp);
        iv = esp_tmp_iv(aead, tmp, seqhilen);
index a6445b843ef40774f2bf5014e6800e1f4fd86b3e..52c273ea05c34c902e07c00609b4f881c392c37b 100644 (file)
@@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg)
                if (!head->dev)
                        goto out_rcu_unlock;
 
-               /* skb dst is stale, drop it, and perform route lookup again */
-               skb_dst_drop(head);
+               /* skb has no dst, perform route lookup again */
                iph = ip_hdr(head);
                err = ip_route_input_noref(head, iph->daddr, iph->saddr,
                                           iph->tos, head->dev);
@@ -523,9 +522,16 @@ found:
                qp->q.max_size = skb->len + ihl;
 
        if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           qp->q.meat == qp->q.len)
-               return ip_frag_reasm(qp, prev, dev);
+           qp->q.meat == qp->q.len) {
+               unsigned long orefdst = skb->_skb_refdst;
 
+               skb->_skb_refdst = 0UL;
+               err = ip_frag_reasm(qp, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return err;
+       }
+
+       skb_dst_drop(skb);
        inet_frag_lru_move(&qp->q);
        return -EINPROGRESS;
 
index c30130062cd6515f31d7497eaa6a403d2b1d629d..c49dcd0284a06c6bb4b4e6787af5888289ba50f3 100644 (file)
@@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
        return dev_match;
 }
 
+static bool rpfilter_is_local(const struct sk_buff *skb)
+{
+       const struct rtable *rt = skb_rtable(skb);
+       return rt && (rt->rt_flags & RTCF_LOCAL);
+}
+
 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_rpfilter_info *info;
@@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        info = par->matchinfo;
        invert = info->flags & XT_RPFILTER_INVERT;
 
-       if (par->in->flags & IFF_LOOPBACK)
+       if (rpfilter_is_local(skb))
                return true ^ invert;
 
        iph = ip_hdr(skb);
index ef54377fb11cbdd3f327fa29c45c623923b8cb2f..397e0f69435fd1b1b4b3775cadc409d67ea63332 100644 (file)
@@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
-                          RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+       flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+                          RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
                           ireq->loc_addr, th->source, th->dest);
index 3bd55bad230ac7f822f4ea75eb280bc8ce0c07fc..13b9c08fc1582531aa34eef92a432dc96f2a1c52 100644 (file)
@@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2;
 #define FLAG_DSACKING_ACK      0x800 /* SACK blocks contained D-SACK info */
 #define FLAG_NONHEAD_RETRANS_ACKED     0x1000 /* Non-head rexmitted data was ACKed */
 #define FLAG_SACK_RENEGING     0x2000 /* snd_una advanced to a sacked seq */
+#define FLAG_UPDATE_TS_RECENT  0x4000 /* tcp_replace_ts_recent() */
 
 #define FLAG_ACKED             (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP           (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -3564,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
        }
 }
 
+static void tcp_store_ts_recent(struct tcp_sock *tp)
+{
+       tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
+}
+
+static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
+{
+       if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
+               /* PAWS bug workaround wrt. ACK frames, the PAWS discard
+                * extra check below makes sure this can only happen
+                * for pure ACK frames.  -DaveM
+                *
+                * Not only, also it occurs for expired timestamps.
+                */
+
+               if (tcp_paws_check(&tp->rx_opt, 0))
+                       tcp_store_ts_recent(tp);
+       }
+}
+
 /* This routine deals with incoming acks, but not outgoing ones. */
 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
@@ -3607,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        prior_fackets = tp->fackets_out;
        prior_in_flight = tcp_packets_in_flight(tp);
 
+       /* ts_recent update must be made after we are sure that the packet
+        * is in window.
+        */
+       if (flag & FLAG_UPDATE_TS_RECENT)
+               tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
        if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
                /* Window is constant, pure forward advance.
                 * No more checks are required.
@@ -3927,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
 EXPORT_SYMBOL(tcp_parse_md5sig_option);
 #endif
 
-static inline void tcp_store_ts_recent(struct tcp_sock *tp)
-{
-       tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = get_seconds();
-}
-
-static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
-{
-       if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
-               /* PAWS bug workaround wrt. ACK frames, the PAWS discard
-                * extra check below makes sure this can only happen
-                * for pure ACK frames.  -DaveM
-                *
-                * Not only, also it occurs for expired timestamps.
-                */
-
-               if (tcp_paws_check(&tp->rx_opt, 0))
-                       tcp_store_ts_recent(tp);
-       }
-}
-
 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
  *
  * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
@@ -5543,14 +5550,9 @@ slow_path:
                return 0;
 
 step5:
-       if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+       if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
                goto discard;
 
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
        tcp_rcv_rtt_measure_ts(sk, skb);
 
        /* Process urgent data. */
@@ -5986,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        /* step 5: check the ACK field */
        if (true) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+                                                 FLAG_UPDATE_TS_RECENT) > 0;
 
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
@@ -6137,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                }
        }
 
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
        /* step 6: check the URG bit */
        tcp_urg(sk, skb, th);
 
index b44cf81d817858737f81e880ac368f07d6ca48de..509912a5ff98e73edfe9d60295de196ed16874a2 100644 (file)
@@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         */
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
-       /* make sure skb->data is aligned on arches that require it */
-       if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+       /* make sure skb->data is aligned on arches that require it
+        * and check if ack-trimming & collapsing extended the headroom
+        * beyond what csum_start can cover.
+        */
+       if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+                    skb_headroom(skb) >= 0xFFFF)) {
                struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
                                                   GFP_ATOMIC);
                return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
index a459c4f5b76914e031798a4b90b3df1998f47a08..dae802c0af7c002df341d3dfe4ed94db6584118e 100644 (file)
@@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
                               struct net_device *dev);
 
-static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
-
 static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .forwarding             = 0,
        .hop_limit              = IPV6_DEFAULT_HOPLIMIT,
@@ -837,7 +835,7 @@ out2:
        rcu_read_unlock_bh();
 
        if (likely(err == 0))
-               atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
+               inet6addr_notifier_call_chain(NETDEV_UP, ifa);
        else {
                kfree(ifa);
                ifa = ERR_PTR(err);
@@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
        ipv6_ifa_notify(RTM_DELADDR, ifp);
 
-       atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
+       inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
 
        /*
         * Purge or update corresponding prefix
@@ -2988,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
                if (state != INET6_IFADDR_STATE_DEAD) {
                        __ipv6_ifa_notify(RTM_DELADDR, ifa);
-                       atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+                       inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
                }
                in6_ifa_put(ifa);
 
@@ -4869,22 +4867,6 @@ static struct pernet_operations addrconf_ops = {
        .exit = addrconf_exit_net,
 };
 
-/*
- *      Device notifier
- */
-
-int register_inet6addr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&inet6addr_chain, nb);
-}
-EXPORT_SYMBOL(register_inet6addr_notifier);
-
-int unregister_inet6addr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
-}
-EXPORT_SYMBOL(unregister_inet6addr_notifier);
-
 static struct rtnl_af_ops inet6_ops = {
        .family           = AF_INET6,
        .fill_link_af     = inet6_fill_link_af,
index d051e5f4bf348092e8062b930b56881669974a95..72104562c86481311857359882669527359a7f6c 100644 (file)
@@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr)
 }
 EXPORT_SYMBOL(__ipv6_addr_type);
 
+static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
+
+int register_inet6addr_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&inet6addr_chain, nb);
+}
+EXPORT_SYMBOL(register_inet6addr_notifier);
+
+int unregister_inet6addr_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
+}
+EXPORT_SYMBOL(unregister_inet6addr_notifier);
+
+int inet6addr_notifier_call_chain(unsigned long val, void *v)
+{
+       return atomic_notifier_call_chain(&inet6addr_chain, val, v);
+}
+EXPORT_SYMBOL(inet6addr_notifier_call_chain);
index 5060d54199abbc180d7dc87ba072bace2bc167f7..e0983f3648a628410c6f6bfd9549ec339a325353 100644 (file)
@@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
        return ret;
 }
 
+static bool rpfilter_is_local(const struct sk_buff *skb)
+{
+       const struct rt6_info *rt = (const void *) skb_dst(skb);
+       return rt && (rt->rt6i_flags & RTF_LOCAL);
+}
+
 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_rpfilter_info *info = par->matchinfo;
@@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct ipv6hdr *iph;
        bool invert = info->flags & XT_RPFILTER_INVERT;
 
-       if (par->in->flags & IFF_LOOPBACK)
+       if (rpfilter_is_local(skb))
                return true ^ invert;
 
        iph = ipv6_hdr(skb);
index 196ab9347ad1df2bab5a9c98720a51e81773985c..0ba10e53a6298f07cb64193681ccbfb419d40f36 100644 (file)
@@ -330,9 +330,17 @@ found:
        }
 
        if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           fq->q.meat == fq->q.len)
-               return ip6_frag_reasm(fq, prev, dev);
+           fq->q.meat == fq->q.len) {
+               int res;
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               res = ip6_frag_reasm(fq, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return res;
+       }
 
+       skb_dst_drop(skb);
        inet_frag_lru_move(&fq->q);
        return -1;
 
index 29340a9a6fb9937f9bc848ffda5c49c51cca47ee..e1b37f5a2691f7a79052f9b840aa0f9910182698 100644 (file)
@@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap,
 {
        struct iriap_cb *self;
 
-       IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
+       IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__,
+                  irlmp_reason_str(reason), reason);
 
        self = instance;
 
index 6115a44c0a24f9ba1cb3248de38b8579118e5dbf..1064621da6f6c297ee7deaa439e9569d9f37cf73 100644 (file)
@@ -66,8 +66,15 @@ const char *irlmp_reasons[] = {
        "LM_LAP_RESET",
        "LM_INIT_DISCONNECT",
        "ERROR, NOT USED",
+       "UNKNOWN",
 };
 
+const char *irlmp_reason_str(LM_REASON reason)
+{
+       reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1);
+       return irlmp_reasons[reason];
+}
+
 /*
  * Function irlmp_init (void)
  *
@@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
 {
        struct lsap_cb *lsap;
 
-       IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
+       IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__,
+                  irlmp_reason_str(reason), reason);
        IRDA_ASSERT(self != NULL, return;);
        IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
 
index 58150f877ec3fe83f29adb2df4caf5bb617e3358..9ed49ad0380f151fc5ad5448e953f6497d800291 100644 (file)
@@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
 }
 
-u32 ieee80211_idle_off(struct ieee80211_local *local)
+static u32 __ieee80211_idle_off(struct ieee80211_local *local)
 {
        if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
                return 0;
@@ -87,7 +87,7 @@ u32 ieee80211_idle_off(struct ieee80211_local *local)
        return IEEE80211_CONF_CHANGE_IDLE;
 }
 
-static u32 ieee80211_idle_on(struct ieee80211_local *local)
+static u32 __ieee80211_idle_on(struct ieee80211_local *local)
 {
        if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
                return 0;
@@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
        return IEEE80211_CONF_CHANGE_IDLE;
 }
 
-void ieee80211_recalc_idle(struct ieee80211_local *local)
+static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
+                                  bool force_active)
 {
        bool working = false, scanning, active;
        unsigned int led_trig_start = 0, led_trig_stop = 0;
        struct ieee80211_roc_work *roc;
-       u32 change;
 
        lockdep_assert_held(&local->mtx);
 
-       active = !list_empty(&local->chanctx_list) || local->monitors;
+       active = force_active ||
+                !list_empty(&local->chanctx_list) ||
+                local->monitors;
 
        if (!local->ops->remain_on_channel) {
                list_for_each_entry(roc, &local->roc_list, list) {
@@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
        ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
 
        if (working || scanning || active)
-               change = ieee80211_idle_off(local);
-       else
-               change = ieee80211_idle_on(local);
+               return __ieee80211_idle_off(local);
+       return __ieee80211_idle_on(local);
+}
+
+u32 ieee80211_idle_off(struct ieee80211_local *local)
+{
+       return __ieee80211_recalc_idle(local, true);
+}
+
+void ieee80211_recalc_idle(struct ieee80211_local *local)
+{
+       u32 change = __ieee80211_recalc_idle(local, false);
        if (change)
                ieee80211_hw_config(local, change);
 }
index 82cc30318a86f66c4a8f23341ea6ed3f6b62d578..346ad4cfb01323cf471758dd5a6fcbdb12b338a6 100644 (file)
@@ -3964,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        /* prep auth_data so we don't go into idle on disassoc */
        ifmgd->auth_data = auth_data;
 
-       if (ifmgd->associated)
-               ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
+       if (ifmgd->associated) {
+               u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+
+               ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+                                      WLAN_REASON_UNSPECIFIED,
+                                      false, frame_buf);
+
+               __cfg80211_send_deauth(sdata->dev, frame_buf,
+                                      sizeof(frame_buf));
+       }
 
        sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
 
@@ -4025,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        mutex_lock(&ifmgd->mtx);
 
-       if (ifmgd->associated)
-               ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
+       if (ifmgd->associated) {
+               u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+
+               ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+                                      WLAN_REASON_UNSPECIFIED,
+                                      false, frame_buf);
+
+               __cfg80211_send_deauth(sdata->dev, frame_buf,
+                                      sizeof(frame_buf));
+       }
 
        if (ifmgd->auth_data && !ifmgd->auth_data->done) {
                err = -EBUSY;
index 0f92dc24cb894bfa0674c2b3f6dca970d7615c64..d7df6ac2c6f10a1bf14be997a981f577e5364b98 100644 (file)
@@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,
 nla_put_failure:
        nla_nest_cancel(skb, nested);
        ipset_nest_end(skb, atd);
-       return -EMSGSIZE;
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
 }
 
 static int
index f2627226a0870b22612fe17397c228ff7ef628b0..10a30b4fc7dba7af7ad858c566791e982475436a 100644 (file)
@@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
+static inline void
+hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
+}
+
 static inline int
 hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
 {
@@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
+static inline void
+hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
+}
+
 static inline int
 hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
 {
index 4b677cf6bf7d7899f7295fea5eeab54df84699c6..d6a59154d7104258d18335a39646fbf9e3b8f598 100644 (file)
@@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
 static inline void
 hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
 {
-       dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+       dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
 }
 
 static inline int
@@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
 static inline void
 hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
 {
-       dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+       dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
 }
 
 static inline int
index 6ba985f1c96ff5ef43bf0ad599e9792bd64f54ef..f2b0a3c30130ba66bc69fe24b39cb54cdbc0c77a 100644 (file)
@@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
 static inline void
 hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
 {
-       dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+       dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
 }
 
 static inline int
@@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
 static inline void
 hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
 {
-       dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+       dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
 static inline int
@@ -503,6 +512,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
        return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
+static inline void
+hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
+}
+
 static inline void
 hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
 {
index af20c0c5ced2e9204df65ddaf37424a9b589be99..349deb672a2d4fd8303ab43bdb3f0207c9124e0d 100644 (file)
@@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
+static inline void
+hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
+}
+
 static inline int
 hash_netport4_data_match(const struct hash_netport4_elem *elem)
 {
@@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
+static inline void
+hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags)
+{
+       if (dst->nomatch) {
+               *flags = IPSET_FLAG_NOMATCH;
+               dst->nomatch = 0;
+       }
+}
+
 static inline int
 hash_netport6_data_match(const struct hash_netport6_elem *elem)
 {
index 8371c2bac2e4240eb5c4b3f6abd0faa48f6212ce..09c744aa89829cb7f99878974ef2471364135f27 100644 (file)
@@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
 {
        const struct set_elem *e = list_set_elem(map, i);
 
-       if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
-               /* Last element replaced: e.g. add new,before,last */
-               ip_set_put_byindex(e->id);
+       if (e->id != IPSET_INVALID_ID) {
+               const struct set_elem *x = list_set_elem(map, map->size - 1);
+
+               /* Last element replaced or pushed off */
+               if (x->id != IPSET_INVALID_ID)
+                       ip_set_put_byindex(x->id);
+       }
        if (with_timeout(map->timeout))
                list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
        else
index 0e7d423324c399b3c5012dc312fd75f470b78cb5..e0c4373b47478d4d72899d95166566db34a8bcfa 100644 (file)
@@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
                end += strlen("\r\n\r\n") + clen;
 
                msglen = origlen = end - dptr;
-               if (msglen > datalen) {
-                       nf_ct_helper_log(skb, ct, "incomplete/bad SIP message");
-                       return NF_DROP;
-               }
+               if (msglen > datalen)
+                       return NF_ACCEPT;
 
                ret = process_sip_msg(skb, ct, protoff, dataoff,
                                      &dptr, &msglen);
index 8d5769c6d16e505ed6239889faee03ab34d4c15d..ad24be070e53c0fe8c27e49a6e2cb93a39c32062 100644 (file)
@@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);
 struct nf_nat_proto_clean {
        u8      l3proto;
        u8      l4proto;
-       bool    hash;
 };
 
-/* Clear NAT section of all conntracks, in case we're loaded again. */
-static int nf_nat_proto_clean(struct nf_conn *i, void *data)
+/* kill conntracks with affected NAT section */
+static int nf_nat_proto_remove(struct nf_conn *i, void *data)
 {
        const struct nf_nat_proto_clean *clean = data;
        struct nf_conn_nat *nat = nfct_nat(i);
 
        if (!nat)
                return 0;
-       if (!(i->status & IPS_SRC_NAT_DONE))
-               return 0;
+
        if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
            (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
                return 0;
 
-       if (clean->hash) {
-               spin_lock_bh(&nf_nat_lock);
-               hlist_del_rcu(&nat->bysource);
-               spin_unlock_bh(&nf_nat_lock);
-       } else {
-               memset(nat, 0, sizeof(*nat));
-               i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
-                              IPS_SEQ_ADJUST);
-       }
-       return 0;
+       return i->status & IPS_NAT_MASK ? 1 : 0;
 }
 
 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
@@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
        struct net *net;
 
        rtnl_lock();
-       /* Step 1 - remove from bysource hash */
-       clean.hash = true;
        for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
-       synchronize_rcu();
-
-       /* Step 2 - clean NAT section */
-       clean.hash = false;
-       for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
        rtnl_unlock();
 }
 
@@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)
        struct net *net;
 
        rtnl_lock();
-       /* Step 1 - remove from bysource hash */
-       clean.hash = true;
-       for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
-       synchronize_rcu();
 
-       /* Step 2 - clean NAT section */
-       clean.hash = false;
        for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
        rtnl_unlock();
 }
 
@@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
+       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
index a4b724708a1ab0b7d4aedffc07606b8fbfdac0a9..6980c3e6f0667b0ed22098098b66da7cf929a47e 100644 (file)
@@ -1593,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
                return ERR_PTR(-ENOMEM);
 
        retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
-       if (retval < 0) {
-               kfree_skb(skb);
-               return ERR_PTR(retval);
-       }
+       BUG_ON(retval < 0);
+
        return skb;
 }
 
@@ -1726,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
            nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
                err = -EINVAL;
 
+       reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!reply) {
+               err = -ENOMEM;
+               goto exit_unlock;
+       }
+
        if (!err && a[OVS_VPORT_ATTR_OPTIONS])
                err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
        if (err)
-               goto exit_unlock;
+               goto exit_free;
+
        if (a[OVS_VPORT_ATTR_UPCALL_PID])
                vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
-                                        OVS_VPORT_CMD_NEW);
-       if (IS_ERR(reply)) {
-               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
-                               ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
-               goto exit_unlock;
-       }
+       err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
+                                     info->snd_seq, 0, OVS_VPORT_CMD_NEW);
+       BUG_ON(err < 0);
 
        genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
+       rtnl_unlock();
+       return 0;
+
+exit_free:
+       kfree_skb(reply);
 exit_unlock:
        rtnl_unlock();
        return err;
index fe0e4215c73d6045469ba006722ef223d43ebd66..67a2b783fe70257cb6c33542806d3f3ca17f3d76 100644 (file)
@@ -795,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
 
 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
 {
+       BUG_ON(table->count == 0);
        hlist_del_rcu(&flow->hash_node[table->node_ver]);
        table->count--;
-       BUG_ON(table->count < 0);
 }
 
 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
index 1135d8227f9bf1811a2c45c211b80e6b9a69706f..9b97172db84a7c72c31c2b6bd36141149859eb1c 100644 (file)
@@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
        if (err < 0)
                return err;
 
-       err = -EINVAL;
        if (tb[TCA_FW_CLASSID]) {
                f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
                tcf_bind_filter(tp, &f->res, base);
@@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
        }
 #endif /* CONFIG_NET_CLS_IND */
 
+       err = -EINVAL;
        if (tb[TCA_FW_MASK]) {
                mask = nla_get_u32(tb[TCA_FW_MASK]);
                if (mask != head->mask)
index b28cc384a5bcf30952f66d7da5b349978e39b49a..4de4bc48493bca4947182bc10483bc272f459331 100755 (executable)
@@ -3016,6 +3016,7 @@ sub process {
                            $dstat !~ /^'X'$/ &&                                        # character constants
                            $dstat !~ /$exceptions/ &&
                            $dstat !~ /^\.$Ident\s*=/ &&                                # .foo =
+                           $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ &&          # stringification #foo
                            $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ &&       # do {...} while (...); // do {...} while (...)
                            $dstat !~ /^for\s*$Constant$/ &&                            # for (...)
                            $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ &&   # for (...) bar()
index 71ae86ca64ac3b696121c66cbc7b334bc03cb3f3..eb560fa32321eff55002830cf78f2764d7a09518 100644 (file)
@@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
                           struct vm_area_struct *area)
 {
-       long size;
-       unsigned long offset;
+       struct snd_pcm_runtime *runtime = substream->runtime;;
 
        area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
-       area->vm_flags |= VM_IO;
-       size = area->vm_end - area->vm_start;
-       offset = area->vm_pgoff << PAGE_SHIFT;
-       if (io_remap_pfn_range(area, area->vm_start,
-                               (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
-                               size, area->vm_page_prot))
-               return -EAGAIN;
-       return 0;
+       return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
 }
 
 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
index 6f3214ed4444ecb14c33cc29c3119057bf812cf5..321e066a07533bcf7a18e72c471e01cc1bef9527 100644 (file)
@@ -1421,6 +1421,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
        case 0x3C:      /* HSW */
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
+       case 0x46:      /* HSW */
                return 1;
        case 0x2E:      /* Nehalem-EX Xeon - Beckton */
        case 0x2F:      /* Westmere-EX Xeon - Eagleton */
@@ -1515,6 +1516,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3C:      /* HSW */
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
+       case 0x46:      /* HSW */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
                break;
        case 0x2D:
@@ -1754,6 +1756,7 @@ int is_snb(unsigned int family, unsigned int model)
        case 0x3C:      /* HSW */
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
+       case 0x46:      /* HSW */
                return 1;
        }
        return 0;
@@ -2276,7 +2279,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.2 February 11, 2013"
+               fprintf(stderr, "turbostat v3.3 March 15, 2013"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();