+++ /dev/null
-Device-Tree bindings for drm hdmi driver
-
-Required properties:
-- compatible: value should be "samsung,exynos5-hdmi".
-- reg: physical base address of the hdmi and length of memory mapped
- region.
-- interrupts: interrupt number to the cpu.
-- hpd-gpio: following information about the hotplug gpio pin.
- a) phandle of the gpio controller node.
- b) pin number within the gpio controller.
- c) pin function mode.
- d) optional flags and pull up/down.
- e) drive strength.
-
-Example:
-
- hdmi {
- compatible = "samsung,exynos5-hdmi";
- reg = <0x14530000 0x100000>;
- interrupts = <0 95 0>;
- hpd-gpio = <&gpx3 7 0xf 1 3>;
- };
+++ /dev/null
-Device-Tree bindings for hdmiddc driver
-
-Required properties:
-- compatible: value should be "samsung,exynos5-hdmiddc".
-- reg: I2C address of the hdmiddc device.
-
-Example:
-
- hdmiddc {
- compatible = "samsung,exynos5-hdmiddc";
- reg = <0x50>;
- };
+++ /dev/null
-Device-Tree bindings for hdmiphy driver
-
-Required properties:
-- compatible: value should be "samsung,exynos5-hdmiphy".
-- reg: I2C address of the hdmiphy device.
-
-Example:
-
- hdmiphy {
- compatible = "samsung,exynos5-hdmiphy";
- reg = <0x38>;
- };
+++ /dev/null
-Device-Tree bindings for mixer driver
-
-Required properties:
-- compatible: value should be "samsung,exynos5-mixer".
-- reg: physical base address of the mixer and length of memory mapped
- region.
-- interrupts: interrupt number to the cpu.
-
-Example:
-
- mixer {
- compatible = "samsung,exynos5-mixer";
- reg = <0x14450000 0x10000>;
- interrupts = <0 94 0>;
- };
- compatible: Should be "cdns,[<chip>-]{macb|gem}"
Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
- Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on
+ Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
--- /dev/null
+Device-Tree bindings for drm hdmi driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-hdmi".
+- reg: physical base address of the hdmi and length of memory mapped
+ region.
+- interrupts: interrupt number to the cpu.
+- hpd-gpio: following information about the hotplug gpio pin.
+ a) phandle of the gpio controller node.
+ b) pin number within the gpio controller.
+ c) pin function mode.
+ d) optional flags and pull up/down.
+ e) drive strength.
+
+Example:
+
+ hdmi {
+ compatible = "samsung,exynos5-hdmi";
+ reg = <0x14530000 0x100000>;
+ interrupts = <0 95 0>;
+ hpd-gpio = <&gpx3 7 0xf 1 3>;
+ };
--- /dev/null
+Device-Tree bindings for hdmiddc driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-hdmiddc".
+- reg: I2C address of the hdmiddc device.
+
+Example:
+
+ hdmiddc {
+ compatible = "samsung,exynos5-hdmiddc";
+ reg = <0x50>;
+ };
--- /dev/null
+Device-Tree bindings for hdmiphy driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-hdmiphy".
+- reg: I2C address of the hdmiphy device.
+
+Example:
+
+ hdmiphy {
+ compatible = "samsung,exynos5-hdmiphy";
+ reg = <0x38>;
+ };
--- /dev/null
+Device-Tree bindings for mixer driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-mixer".
+- reg: physical base address of the mixer and length of memory mapped
+ region.
+- interrupts: interrupt number to the cpu.
+
+Example:
+
+ mixer {
+ compatible = "samsung,exynos5-mixer";
+ reg = <0x14450000 0x10000>;
+ interrupts = <0 94 0>;
+ };
--- /dev/null
+Simple Framebuffer
+
+A simple frame-buffer describes a raw memory region that may be rendered to,
+with the assumption that the display hardware has already been set up to scan
+out from that buffer.
+
+Required properties:
+- compatible: "simple-framebuffer"
+- reg: Should contain the location and size of the framebuffer memory.
+- width: The width of the framebuffer in pixels.
+- height: The height of the framebuffer in pixels.
+- stride: The number of bytes in each line of the framebuffer.
+- format: The format of the framebuffer surface. Valid values are:
+ - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
+
+Example:
+
+ framebuffer {
+ compatible = "simple-framebuffer";
+ reg = <0x1d385000 (1600 * 1200 * 2)>;
+ width = <1600>;
+ height = <1200>;
+ stride = <(1600 * 2)>;
+ format = "r5g6b5";
+ };
};
The bootargs property contains the kernel arguments, and the initrd-*
-properties define the address and size of an initrd blob. The
-chosen node may also optionally contain an arbitrary number of
-additional properties for platform-specific configuration data.
+properties define the address and size of an initrd blob. Note that
+initrd-end is the first address after the initrd image, so this doesn't
+match the usual semantic of struct resource. The chosen node may also
+optionally contain an arbitrary number of additional properties for
+platform-specific configuration data.
During early boot, the architecture setup code calls of_scan_flat_dt()
several times with different helper callbacks to parse device tree
After a while you will start to get messages about current status or error like
in the original code.
-Note that running a new test will stop any in progress test.
+Note that running a new test will not stop any in progress test.
The following command should return actual state of the test.
% cat /sys/kernel/debug/dmatest/run
The module parameters that is supplied to the kernel command line will be used
for the first performed test. After user gets a control, the test could be
-interrupted or re-run with same or different parameters. For the details see
-the above section "Part 2 - When dmatest is built as a module..."
+re-run with the same or different parameters. For the details see the above
+section "Part 2 - When dmatest is built as a module..."
In both cases the module parameters are used as initial values for the test case.
You always could check them at run-time by running
removing extended attributes) the on-disk superblock feature
bit field will be updated to reflect this format being in use.
+ CRC enabled filesystems always use the attr2 format, and so
+ will reject the noattr2 mount option if it is set.
+
barrier
Enables the use of block layer write barriers for writes into
the journal and unwritten extent conversion. This allows for
Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD.
+ tmem [KNL,XEN]
+ Enable the Transcendent memory driver if built-in.
+
+ tmem.cleancache=0|1 [KNL, XEN]
+ Default is on (1). Disable the usage of the cleancache
+ API to send anonymous pages to the hypervisor.
+
+ tmem.frontswap=0|1 [KNL, XEN]
+ Default is on (1). Disable the usage of the frontswap
+ API to send swap pages to the hypervisor. If disabled
+ the selfballooning and selfshrinking are force disabled.
+
+ tmem.selfballooning=0|1 [KNL, XEN]
+ Default is on (1). Disable the driving of swap pages
+ to the hypervisor.
+
+ tmem.selfshrinking=0|1 [KNL, XEN]
+ Default is on (1). Partial swapoff that immediately
+ transfers pages from Xen hypervisor back to the
+ kernel based on different criteria.
+
topology= [S390]
Format: {off | on}
Specify if the kernel should make use of the cpu
--- /dev/null
+REDUCING OS JITTER DUE TO PER-CPU KTHREADS
+
+This document lists per-CPU kthreads in the Linux kernel and presents
+options to control their OS jitter. Note that non-per-CPU kthreads are
+not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
+them to a "housekeeping" CPU dedicated to such work.
+
+
+REFERENCES
+
+o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
+
+o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
+
+o man taskset: Using the taskset command to bind tasks to sets
+ of CPUs.
+
+o man sched_setaffinity: Using the sched_setaffinity() system
+ call to bind tasks to sets of CPUs.
+
+o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
+ writing "0" to offline and "1" to online.
+
+o In order to locate kernel-generated OS jitter on CPU N:
+
+ cd /sys/kernel/debug/tracing
+ echo 1 > max_graph_depth # Increase the "1" for more detail
+ echo function_graph > current_tracer
+ # run workload
+ cat per_cpu/cpuN/trace
+
+
+KTHREADS
+
+Name: ehca_comp/%u
+Purpose: Periodically process Infiniband-related work.
+To reduce its OS jitter, do any of the following:
+1. Don't use eHCA Infiniband hardware, instead choosing hardware
+ that does not require per-CPU kthreads. This will prevent these
+ kthreads from being created in the first place. (This will
+ work for most people, as this hardware, though important, is
+ relatively old and is produced in relatively low unit volumes.)
+2. Do all eHCA-Infiniband-related work on other CPUs, including
+ interrupts.
+3. Rework the eHCA driver so that its per-CPU kthreads are
+ provisioned only on selected CPUs.
+
+
+Name: irq/%d-%s
+Purpose: Handle threaded interrupts.
+To reduce its OS jitter, do the following:
+1. Use irq affinity to force the irq threads to execute on
+ some other CPU.
+
+Name: kcmtpd_ctr_%d
+Purpose: Handle Bluetooth work.
+To reduce its OS jitter, do one of the following:
+1. Don't use Bluetooth, in which case these kthreads won't be
+ created in the first place.
+2. Use irq affinity to force Bluetooth-related interrupts to
+ occur on some other CPU and furthermore initiate all
+ Bluetooth activity on some other CPU.
+
+Name: ksoftirqd/%u
+Purpose: Execute softirq handlers when threaded or when under heavy load.
+To reduce its OS jitter, each softirq vector must be handled
+separately as follows:
+TIMER_SOFTIRQ: Do all of the following:
+1. To the extent possible, keep the CPU out of the kernel when it
+ is non-idle, for example, by avoiding system calls and by forcing
+ both kernel threads and interrupts to execute elsewhere.
+2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force
+ the CPU offline, then bring it back online. This forces
+ recurring timers to migrate elsewhere. If you are concerned
+ with multiple CPUs, force them all offline before bringing the
+ first one back online. Once you have onlined the CPUs in question,
+ do not offline any other CPUs, because doing so could force the
+ timer back onto one of the CPUs in question.
+NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
+1. Force networking interrupts onto other CPUs.
+2. Initiate any network I/O on other CPUs.
+3. Once your application has started, prevent CPU-hotplug operations
+ from being initiated from tasks that might run on the CPU to
+ be de-jittered. (It is OK to force this CPU offline and then
+ bring it back online before you start your application.)
+BLOCK_SOFTIRQ: Do all of the following:
+1. Force block-device interrupts onto some other CPU.
+2. Initiate any block I/O on other CPUs.
+3. Once your application has started, prevent CPU-hotplug operations
+ from being initiated from tasks that might run on the CPU to
+ be de-jittered. (It is OK to force this CPU offline and then
+ bring it back online before you start your application.)
+BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
+1. Force block-device interrupts onto some other CPU.
+2. Initiate any block I/O and block-I/O polling on other CPUs.
+3. Once your application has started, prevent CPU-hotplug operations
+ from being initiated from tasks that might run on the CPU to
+ be de-jittered. (It is OK to force this CPU offline and then
+ bring it back online before you start your application.)
+TASKLET_SOFTIRQ: Do one or more of the following:
+1. Avoid use of drivers that use tasklets. (Such drivers will contain
+ calls to things like tasklet_schedule().)
+2. Convert all drivers that you must use from tasklets to workqueues.
+3. Force interrupts for drivers using tasklets onto other CPUs,
+ and also do I/O involving these drivers on other CPUs.
+SCHED_SOFTIRQ: Do all of the following:
+1. Avoid sending scheduler IPIs to the CPU to be de-jittered,
+ for example, ensure that at most one runnable kthread is present
+ on that CPU. If a thread that expects to run on the de-jittered
+ CPU awakens, the scheduler will send an IPI that can result in
+ a subsequent SCHED_SOFTIRQ.
+2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
+ CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
+ to be de-jittered is marked as an adaptive-ticks CPU using the
+ "nohz_full=" boot parameter. This reduces the number of
+ scheduler-clock interrupts that the de-jittered CPU receives,
+ minimizing its chances of being selected to do the load balancing
+ work that runs in SCHED_SOFTIRQ context.
+3. To the extent possible, keep the CPU out of the kernel when it
+ is non-idle, for example, by avoiding system calls and by
+ forcing both kernel threads and interrupts to execute elsewhere.
+ This further reduces the number of scheduler-clock interrupts
+ received by the de-jittered CPU.
+HRTIMER_SOFTIRQ: Do all of the following:
+1. To the extent possible, keep the CPU out of the kernel when it
+ is non-idle. For example, avoid system calls and force both
+ kernel threads and interrupts to execute elsewhere.
+2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the
+ CPU offline, then bring it back online. This forces recurring
+ timers to migrate elsewhere. If you are concerned with multiple
+ CPUs, force them all offline before bringing the first one
+ back online. Once you have onlined the CPUs in question, do not
+ offline any other CPUs, because doing so could force the timer
+ back onto one of the CPUs in question.
+RCU_SOFTIRQ: Do at least one of the following:
+1. Offload callbacks and keep the CPU in either dyntick-idle or
+ adaptive-ticks state by doing all of the following:
+ a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
+ CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
+ to be de-jittered is marked as an adaptive-ticks CPU using
+ the "nohz_full=" boot parameter. Bind the rcuo kthreads
+ to housekeeping CPUs, which can tolerate OS jitter.
+ b. To the extent possible, keep the CPU out of the kernel
+ when it is non-idle, for example, by avoiding system
+ calls and by forcing both kernel threads and interrupts
+ to execute elsewhere.
+2. Enable RCU to do its processing remotely via dyntick-idle by
+ doing all of the following:
+ a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
+ b. Ensure that the CPU goes idle frequently, allowing other
+ CPUs to detect that it has passed through an RCU quiescent
+ state. If the kernel is built with CONFIG_NO_HZ_FULL=y,
+ userspace execution also allows other CPUs to detect that
+ the CPU in question has passed through a quiescent state.
+ c. To the extent possible, keep the CPU out of the kernel
+ when it is non-idle, for example, by avoiding system
+ calls and by forcing both kernel threads and interrupts
+ to execute elsewhere.
+
+Name: rcuc/%u
+Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
+To reduce its OS jitter, do at least one of the following:
+1. Build the kernel with CONFIG_PREEMPT=n. This prevents these
+ kthreads from being created in the first place, and also obviates
+ the need for RCU priority boosting. This approach is feasible
+ for workloads that do not require high degrees of responsiveness.
+2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these
+ kthreads from being created in the first place. This approach
+ is feasible only if your workload never requires RCU priority
+ boosting, for example, if you ensure frequent idle time on all
+ CPUs that might execute within the kernel.
+3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
+ which offloads all RCU callbacks to kthreads that can be moved
+ off of CPUs susceptible to OS jitter. This approach prevents the
+ rcuc/%u kthreads from having any work to do, so that they are
+ never awakened.
+4. Ensure that the CPU never enters the kernel, and, in particular,
+ avoid initiating any CPU hotplug operations on this CPU. This is
+ another way of preventing any callbacks from being queued on the
+ CPU, again preventing the rcuc/%u kthreads from having any work
+ to do.
+
+Name: rcuob/%d, rcuop/%d, and rcuos/%d
+Purpose: Offload RCU callbacks from the corresponding CPU.
+To reduce its OS jitter, do at least one of the following:
+1. Use affinity, cgroups, or other mechanism to force these kthreads
+ to execute on some other CPU.
+2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
+ kthreads from being created in the first place. However, please
+ note that this will not eliminate OS jitter, but will instead
+ shift it to RCU_SOFTIRQ.
+
+Name: watchdog/%u
+Purpose: Detect software lockups on each CPU.
+To reduce its OS jitter, do at least one of the following:
+1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
+ kthreads from being created in the first place.
+2. Echo a zero to /proc/sys/kernel/watchdog to disable the
+ watchdog timer.
+3. Echo a large number of /proc/sys/kernel/watchdog_thresh in
+ order to reduce the frequency of OS jitter due to the watchdog
+ timer down to a level that is acceptable for your workload.
System Power Management Phases
------------------------------
Suspending or resuming the system is done in several phases. Different phases
-are used for standby or memory sleep states ("suspend-to-RAM") and the
+are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
for every device before the next phase begins. Not all busses or classes
support all these callbacks and not all drivers use all the callbacks. The
Entering System Suspend
-----------------------
-When the system goes into the standby or memory sleep state, the phases are:
+When the system goes into the freeze, standby or memory sleep state,
+the phases are:
prepare, suspend, suspend_late, suspend_noirq.
Leaving System Suspend
----------------------
-When resuming from standby or memory sleep, the phases are:
+When resuming from freeze, standby or memory sleep, the phases are:
resume_noirq, resume_early, resume, complete.
Entering Hibernation
--------------------
-Hibernating the system is more complicated than putting it into the standby or
-memory sleep state, because it involves creating and saving a system image.
+Hibernating the system is more complicated than putting it into the other
+sleep states, because it involves creating and saving a system image.
Therefore there are more phases for hibernation, with a different set of
callbacks. These phases always run after tasks have been frozen and memory has
been freed.
At this point the system image is saved, and the devices then need to be
prepared for the upcoming system shutdown. This is much like suspending them
-before putting the system into the standby or memory sleep state, and the phases
-are similar.
+before putting the system into the freeze, standby or memory sleep state,
+and the phases are similar.
9. The prepare phase is discussed above.
is mounted at /sys).
/sys/power/state controls system power state. Reading from this file
-returns what states are supported, which is hard-coded to 'standby'
-(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+returns what states are supported, which is hard-coded to 'freeze',
+'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
(Suspend-to-Disk).
Writing to this file one of those strings causes the system to
The subsystems or drivers having such needs can register suspend notifiers that
will be called upon the following events by the PM core:
-PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will
- be frozen immediately.
+PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
+ immediately. This is different from PM_SUSPEND_PREPARE
+ below because here we do additional work between notifiers
+ and drivers freezing.
PM_POST_HIBERNATION The system memory state has been restored from a
hibernation image or an error occurred during
System Power Management States
-The kernel supports three power management states generically, though
-each is dependent on platform support code to implement the low-level
-details for each state. This file describes each state, what they are
+The kernel supports four power management states generically, though
+one is generic and the other three are dependent on platform support
+code to implement the low-level details for each state.
+This file describes each state, what they are
commonly called, what ACPI state they map to, and what string to write
to /sys/power/state to enter that state
+state: Freeze / Low-Power Idle
+ACPI state: S0
+String: "freeze"
+
+This state is a generic, pure software, light-weight, low-power state.
+It allows more energy to be saved relative to idle by freezing user
+space and putting all I/O devices into low-power states (possibly
+lower-power than available at run time), such that the processors can
+spend more time in their idle states.
+This state can be used for platforms without Standby/Suspend-to-RAM
+support, or it can be used in addition to Suspend-to-RAM (memory sleep)
+to provide reduced resume latency.
+
State: Standby / Power-On Suspend
ACPI State: S1
also offers low power savings, but low resume latency. Not all devices
support D1, and those that don't are left on.
-A transition from Standby to the On state should take about 1-2
-seconds.
-
State: Suspend-to-RAM
ACPI State: S3
For at least ACPI, STR requires some minimal boot-strapping code to
resume the system from STR. This may be true on other platforms.
-A transition from Suspend-to-RAM to the On state should take about
-3-5 seconds.
-
State: Suspend-to-disk
ACPI State: S4
down offers greater savings, and allows this mechanism to work on any
system. However, entering a real low-power state allows the user to
trigger wake up events (e.g. pressing a key or opening a laptop lid).
-
-A transition from Suspend-to-Disk to the On state should take about 30
-seconds, though it's typically a bit more with the current
-implementation.
fix_the_problem(ucp->dar);
}
+When in an active transaction that takes a signal, we need to be careful with
+the stack. It's possible that the stack has moved back up after the tbegin.
+The obvious case here is when the tbegin is called inside a function that
+returns before a tend. In this case, the stack is part of the checkpointed
+transactional memory state. If we write over this non transactionally or in
+suspend, we are in trouble because if we get a tm abort, the program counter and
+stack pointer will be back at the tbegin but our in memory stack won't be valid
+anymore.
+
+To avoid this, when taking a signal in an active transaction, we need to use
+the stack pointer from the checkpointed state, rather than the speculated
+state. This ensures that the signal context (written tm suspended) will be
+written below the stack required for the rollback. The transaction is aborted
+becuase of the treclaim, so any memory written between the tbegin and the
+signal will be rolled back anyway.
+
+For signals taken in non-TM or suspended mode, we use the
+normal/non-checkpointed stack pointer.
+
Failure cause codes used by kernel
==================================
kernel aborted a transaction:
TM_CAUSE_RESCHED Thread was rescheduled.
+ TM_CAUSE_TLBI Software TLB invalide.
TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
transactions for consistency will use this.
TM_CAUSE_SIGNAL Signal delivered.
TM_CAUSE_MISC Currently unused.
+ TM_CAUSE_ALIGNMENT Alignment fault.
+ TM_CAUSE_EMULATE Emulation that touched memory.
-These can be checked by the user program's abort handler as TEXASR[0:7].
-
+These can be checked by the user program's abort handler as TEXASR[0:7]. If
+bit 7 is set, it indicates that the error is consider persistent. For example
+a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
GDB
===
In order to initialize the RapidIO subsystem, a platform must initialize and
register at least one master port within the RapidIO network. To register mport
within the subsystem controller driver initialization code calls function
-rio_register_mport() for each available master port. After all active master
-ports are registered with a RapidIO subsystem, the rio_init_mports() routine
-is called to perform enumeration and discovery.
+rio_register_mport() for each available master port.
-In the current PowerPC-based implementation a subsys_initcall() is specified to
-perform controller initialization and mport registration. At the end it directly
-calls rio_init_mports() to execute RapidIO enumeration and discovery.
+RapidIO subsystem uses subsys_initcall() or device_initcall() to perform
+controller initialization (depending on controller device type).
+
+After all active master ports are registered with a RapidIO subsystem,
+an enumeration and/or discovery routine may be called automatically or
+by user-space command.
4. Enumeration and Discovery
----------------------------
-When rio_init_mports() is called it scans a list of registered master ports and
-calls an enumeration or discovery routine depending on the configured role of a
-master port: host or agent.
+4.1 Overview
+------------
+
+RapidIO subsystem configuration options allow users to specify enumeration and
+discovery methods as statically linked components or loadable modules.
+An enumeration/discovery method implementation and available input parameters
+define how any given method can be attached to available RapidIO mports:
+simply to all available mports OR individually to the specified mport device.
+
+Depending on selected enumeration/discovery build configuration, there are
+several methods to initiate an enumeration and/or discovery process:
+
+ (a) Statically linked enumeration and discovery process can be started
+ automatically during kernel initialization time using corresponding module
+ parameters. This was the original method used since introduction of RapidIO
+ subsystem. Now this method relies on enumerator module parameter which is
+ 'rio-scan.scan' for existing basic enumeration/discovery method.
+ When automatic start of enumeration/discovery is used a user has to ensure
+ that all discovering endpoints are started before the enumerating endpoint
+ and are waiting for enumeration to be completed.
+ Configuration option CONFIG_RAPIDIO_DISC_TIMEOUT defines time that discovering
+ endpoint waits for enumeration to be completed. If the specified timeout
+ expires the discovery process is terminated without obtaining RapidIO network
+ information. NOTE: a timed out discovery process may be restarted later using
+ a user-space command as it is described later if the given endpoint was
+ enumerated successfully.
+
+ (b) Statically linked enumeration and discovery process can be started by
+ a command from user space. This initiation method provides more flexibility
+ for a system startup compared to the option (a) above. After all participating
+ endpoints have been successfully booted, an enumeration process shall be
+ started first by issuing a user-space command, after an enumeration is
+ completed a discovery process can be started on all remaining endpoints.
+
+ (c) Modular enumeration and discovery process can be started by a command from
+ user space. After an enumeration/discovery module is loaded, a network scan
+ process can be started by issuing a user-space command.
+ Similar to the option (b) above, an enumerator has to be started first.
+
+ (d) Modular enumeration and discovery process can be started by a module
+ initialization routine. In this case an enumerating module shall be loaded
+ first.
+
+When a network scan process is started it calls an enumeration or discovery
+routine depending on the configured role of a master port: host or agent.
Enumeration is performed by a master port if it is configured as a host port by
assigning a host device ID greater than or equal to zero. A host device ID is
The enumeration and discovery routines use RapidIO maintenance transactions
to access the configuration space of devices.
-The enumeration process is implemented according to the enumeration algorithm
-outlined in the RapidIO Interconnect Specification: Annex I [1].
+4.2 Automatic Start of Enumeration and Discovery
+------------------------------------------------
+
+Automatic enumeration/discovery start method is applicable only to built-in
+enumeration/discovery RapidIO configuration selection. To enable automatic
+enumeration/discovery start by existing basic enumerator method set use boot
+command line parameter "rio-scan.scan=1".
+
+This configuration requires synchronized start of all RapidIO endpoints that
+form a network which will be enumerated/discovered. Discovering endpoints have
+to be started before an enumeration starts to ensure that all RapidIO
+controllers have been initialized and are ready to be discovered. Configuration
+parameter CONFIG_RAPIDIO_DISC_TIMEOUT defines time (in seconds) which
+a discovering endpoint will wait for enumeration to be completed.
+
+When automatic enumeration/discovery start is selected, basic method's
+initialization routine calls rio_init_mports() to perform enumeration or
+discovery for all known mport devices.
+
+Depending on RapidIO network size and configuration this automatic
+enumeration/discovery start method may be difficult to use due to the
+requirement for synchronized start of all endpoints.
+
+4.3 User-space Start of Enumeration and Discovery
+-------------------------------------------------
+
+User-space start of enumeration and discovery can be used with built-in and
+modular build configurations. For user-space controlled start RapidIO subsystem
+creates the sysfs write-only attribute file '/sys/bus/rapidio/scan'. To initiate
+an enumeration or discovery process on specific mport device, a user needs to
+write mport_ID (not RapidIO destination ID) into that file. The mport_ID is a
+sequential number (0 ... RIO_MAX_MPORTS) assigned during mport device
+registration. For example for machine with single RapidIO controller, mport_ID
+for that controller always will be 0.
+
+To initiate RapidIO enumeration/discovery on all available mports a user may
+write '-1' (or RIO_MPORT_ANY) into the scan attribute file.
+
+4.4 Basic Enumeration Method
+----------------------------
+
+This is an original enumeration/discovery method which is available since
+first release of RapidIO subsystem code. The enumeration process is
+implemented according to the enumeration algorithm outlined in the RapidIO
+Interconnect Specification: Annex I [1].
+
+This method can be configured as statically linked or loadable module.
+The method's single parameter "scan" allows to trigger the enumeration/discovery
+process from module initialization routine.
+
+This enumeration/discovery method can be started only once and does not support
+unloading if it is built as a module.
The enumeration process traverses the network using a recursive depth-first
algorithm. When a new device is found, the enumerator takes ownership of that
an agent skips RapidIO discovery and continues with remaining kernel
initialization.
+4.5 Adding New Enumeration/Discovery Method
+-------------------------------------------
+
+RapidIO subsystem code organization allows addition of new enumeration/discovery
+methods as new configuration options without significant impact to to the core
+RapidIO code.
+
+A new enumeration/discovery method has to be attached to one or more mport
+devices before an enumeration/discovery process can be started. Normally,
+method's module initialization routine calls rio_register_scan() to attach
+an enumerator to a specified mport device (or devices). The basic enumerator
+implementation demonstrates this process.
+
5. References
-------------
IDT_GEN2:
errlog - reads contents of device error log until it is empty.
+
+
+5. RapidIO Bus Attributes
+-------------------------
+
+RapidIO bus subdirectory /sys/bus/rapidio implements the following bus-specific
+attribute:
+
+ scan - allows to trigger enumeration discovery process from user space. This
+ is a write-only attribute. To initiate an enumeration or discovery
+ process on specific mport device, a user needs to write mport_ID (not
+ RapidIO destination ID) into this file. The mport_ID is a sequential
+ number (0 ... RIO_MAX_MPORTS) assigned to the mport device.
+ For example, for a machine with a single RapidIO controller, mport_ID
+ for that controller always will be 0.
+ To initiate RapidIO enumeration/discovery on all available mports
+ a user must write '-1' (or RIO_MPORT_ANY) into this attribute file.
ECRYPT FILE SYSTEM
M: Tyler Hicks <tyhicks@canonical.com>
-M: Dustin Kirkland <dustin.kirkland@gazzang.com>
L: ecryptfs@vger.kernel.org
+W: http://ecryptfs.org
W: https://launchpad.net/ecryptfs
S: Supported
F: Documentation/filesystems/ecryptfs.txt
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
-M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: linux-fbdev@vger.kernel.org
W: http://linux-fbdev.sourceforge.net/
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
-T: git git://github.com/schandinat/linux-2.6.git fbdev-next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
S: Maintained
F: Documentation/fb/
F: Documentation/devicetree/bindings/fb/
M: Haiyang Zhang <haiyangz@microsoft.com>
L: devel@linuxdriverproject.org
S: Maintained
-F: drivers/hv/
+F: arch/x86/include/asm/mshyperv.h
+F: arch/x86/include/uapi/asm/hyperv.h
+F: arch/x86/kernel/cpu/mshyperv.c
F: drivers/hid/hid-hyperv.c
+F: drivers/hv/
F: drivers/net/hyperv/
+F: drivers/scsi/storvsc_drv.c
+F: drivers/video/hyperv_fb.c
+F: include/linux/hyperv.h
+F: tools/hv/
I2C OVER PARALLEL PORT
M: Jean Delvare <khali@linux-fr.org>
F: drivers/scsi/*iscsi*
F: include/scsi/*iscsi*
+ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
+M: Or Gerlitz <ogerlitz@mellanox.com>
+M: Roi Dayan <roid@mellanox.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+W: http://www.openfabrics.org
+W: www.open-iscsi.org
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+F: drivers/infiniband/ulp/iser
+
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
F: include/uapi/linux/sunrpc/
KERNEL VIRTUAL MACHINE (KVM)
-M: Marcelo Tosatti <mtosatti@redhat.com>
M: Gleb Natapov <gleb@redhat.com>
+M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
-W: http://kvm.qumranet.com
+W: http://linux-kvm.org
S: Supported
-F: Documentation/*/kvm.txt
+F: Documentation/*/kvm*.txt
+F: Documentation/virtual/kvm/
F: arch/*/kvm/
F: arch/*/include/asm/kvm*
F: include/linux/kvm*
F: Documentation/hwmon/lm90
F: drivers/hwmon/lm90.c
+LM95234 HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: Documentation/hwmon/lm95234
+F: drivers/hwmon/lm95234.c
+
LME2510 MEDIA DRIVER
M: Malcolm Priestley <tvboxspy@gmail.com>
L: linux-media@vger.kernel.org
F: Documentation/networking/vxge.txt
F: drivers/net/ethernet/neterion/
-NETFILTER/IPTABLES/IPCHAINS
-P: Harald Welte
-P: Jozsef Kadlecsik
+NETFILTER/IPTABLES
M: Pablo Neira Ayuso <pablo@netfilter.org>
M: Patrick McHardy <kaber@trash.net>
+M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
L: netfilter-devel@vger.kernel.org
L: netfilter@vger.kernel.org
L: coreteam@netfilter.org
W: http://www.netfilter.org/
W: http://www.iptables.org/
-T: git git://1984.lsi.us.es/nf
-T: git git://1984.lsi.us.es/nf-next
+Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
S: Supported
F: include/linux/netfilter*
F: include/linux/netfilter/
W: http://www.parisc-linux.org/
Q: http://patchwork.kernel.org/project/linux-parisc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
S: Maintained
F: arch/parisc/
+F: Documentation/parisc/
F: drivers/parisc/
+F: drivers/char/agp/parisc-agp.c
+F: drivers/input/serio/gscps2.c
+F: drivers/parport/parport_gsc.*
+F: drivers/tty/serial/8250/8250_gsc.c
+F: drivers/video/sti*
+F: drivers/video/console/sti*
+F: drivers/video/logo/logo_parisc*
PC87360 HARDWARE MONITORING DRIVER
M: Jim Cromie <jim.cromie@gmail.com>
L: target-devel@vger.kernel.org
L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
S: Supported
F: drivers/target/
F: include/target/
F: include/linux/mmc/tmio.h
F: include/linux/mmc/sh_mobile_sdhi.h
+TMP401 HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: Documentation/hwmon/tmp401
+F: drivers/hwmon/tmp401.c
+
TMPFS (SHMEM FILESYSTEM)
M: Hugh Dickins <hughd@google.com>
L: linux-mm@kvack.org
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
NAME = Unicycling Gorilla
# *DOCUMENTATION*
config GENERIC_SMP_IDLE_THREAD
bool
+config GENERIC_IDLE_POLL_SETUP
+ bool
+
# Select if arch init_task initializer is different to init/init_task.c
config ARCH_INIT_TASK
bool
soc100 {
uart@FF100000 {
- pinctrl-names = "abilis,simple-default";
+ pinctrl-names = "default";
pinctrl-0 = <&pctl_uart0>;
};
ethernet@FE100000 {
soc100 {
uart@FF100000 {
- pinctrl-names = "abilis,simple-default";
+ pinctrl-names = "default";
pinctrl-0 = <&pctl_uart0>;
};
ethernet@FE100000 {
};
uart@FF100000 {
- compatible = "snps,dw-apb-uart",
- "abilis,simple-pinctrl";
+ compatible = "snps,dw-apb-uart";
reg = <0xFF100000 0x100>;
clock-frequency = <166666666>;
interrupts = <25 1>;
#address-cells = <1>;
#size-cells = <0>;
cell-index = <1>;
- compatible = "abilis,tb100-spi",
- "abilis,simple-pinctrl";
+ compatible = "abilis,tb100-spi";
num-cs = <2>;
reg = <0xFE011000 0x20>;
interrupt-parent = <&tb10x_ictl>;
#endif
}
-#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
/*
* checks if two addresses (after page aligning) index into same cache set
*/
#define addr_not_cache_congruent(addr1, addr2) \
+({ \
cache_is_vipt_aliasing() ? \
- (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \
+ (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
+})
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-
-#define clear_user_page(addr, vaddr, pg) clear_page(addr)
-#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
-
-#else /* VIPT aliasing dcache */
-
struct vm_area_struct;
struct page;
unsigned long u_vaddr, struct vm_area_struct *vma);
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
-#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
-
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
-#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
-#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
-#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
/* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
-#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
-#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
-#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
#endif
/* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
+ _PAGE_GLOBAL | _PAGE_PRESENT)
#ifdef CONFIG_ARC_CACHE_PAGES
#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
* -by default cached, unless config otherwise
* -present in memory
*/
-#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+
+#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
+#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
+#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
* kernel vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached
*/
-#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
/* ioremap */
-#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
- _PAGE_GLOBAL)
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
/* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
- _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
#ifndef __ASSEMBLY__
local_irq_save(flags);
__ic_line_inv_vaddr(paddr, vaddr, len);
- __dc_line_op(paddr, vaddr, len, OP_FLUSH);
+ __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
local_irq_restore(flags);
}
flush_cache_all();
}
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long u_vaddr)
+{
+ /* TBD: do we really need to clear the kernel mapping */
+ __flush_dcache_page(page_address(page), u_vaddr);
+ __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
set_bit(PG_arch_1, &page->flags);
}
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
- unsigned long u_vaddr)
-{
- /* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page(page_address(page), u_vaddr);
- __flush_dcache_page(page_address(page), page_address(page));
-
-}
-
-#endif
/**********************************************************************
* Explicit Cache flush request from user space via syscall
* so userspace sees the right data.
* (Avoids the flush for Non-exec + congruent mapping case)
*/
- if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
+ if ((vma->vm_flags & VM_EXEC) ||
+ addr_not_cache_congruent(paddr, vaddr)) {
struct page *page = pfn_to_page(pte_pfn(*ptep));
int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
;----------------------------------------------------------------
; VERIFY_PTE: Check if PTE permissions approp for executing code
cmp_s r2, VMALLOC_START
- mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+ mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE)
mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
and r3, r0, r2 ; Mask out NON Flag bits from PTE
mov_s r2, 0
lr r3, [ecr]
btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
- or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
- or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE
; Above laddering takes care of XCHG access
; which is both Read and Write
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-static void __init tb10x_platform_late_init(void)
-{
- struct device_node *dn;
-
- /*
- * Pinctrl documentation recommends setting up the iomux here for
- * all modules which don't require control over the pins themselves.
- * Modules which need this kind of assistance are compatible with
- * "abilis,simple-pinctrl", i.e. we can easily iterate over them.
- * TODO: Does this recommended method work cleanly with pins required
- * by modules?
- */
- for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") {
- struct platform_device *pd = of_find_device_by_node(dn);
- struct pinctrl *pctl;
-
- pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default");
- if (IS_ERR(pctl)) {
- int ret = PTR_ERR(pctl);
- dev_err(&pd->dev, "Could not set up pinctrl: %d\n",
- ret);
- }
- }
-}
-
static const char *tb10x_compat[] __initdata = {
"abilis,arc-tb10x",
NULL,
MACHINE_START(TB10x, "tb10x")
.dt_compat = tb10x_compat,
.init_machine = tb10x_platform_init,
- .init_late = tb10x_platform_late_init,
MACHINE_END
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
config ARCH_DOVE
bool "Marvell Dove"
select ARCH_REQUIRE_GPIOLIB
- select CPU_V7
+ select CPU_PJ4
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select PINCTRL
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
echo ' uImage - U-Boot wrapped zImage'
- echo ' bootpImage - Combined zImage and initial RAM disk'
+ echo ' bootpImage - Combined zImage and initial RAM disk'
echo ' (supply initrd image via make variable INITRD=<path>)'
echo '* dtbs - Build device tree blobs for enabled boards'
echo ' install - Install uncompressed kernel'
spear320-evb.dtb \
spear320-hmi.dtb
dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \
+dtb-$(CONFIG_ARCH_SUNXI) += \
+ sun4i-a10-cubieboard.dtb \
+ sun4i-a10-mini-xplus.dtb \
sun4i-a10-hackberry.dtb \
sun5i-a13-olinuxino.dtb
dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
#size-cells = <1>;
compatible = "simple-bus";
interrupt-parent = <&mpic>;
- ranges = <0 0 0xd0000000 0x100000>;
+ ranges = <0 0 0xd0000000 0x0100000 /* internal registers */
+ 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>;
internal-regs {
compatible = "simple-bus";
};
soc {
- ranges = <0 0xd0000000 0x100000>;
+ ranges = <0 0xd0000000 0x0100000 /* internal registers */
+ 0xe0000000 0xe0000000 0x8100000 /* PCIe */>;
internal-regs {
system-controller@18200 {
compatible = "marvell,armada-370-xp-system-controller";
L2: l2-cache {
compatible = "marvell,aurora-outer-cache";
- reg = <0xd0008000 0x1000>;
+ reg = <0x08000 0x1000>;
cache-id-part = <0x100>;
wt-override;
};
- mpic: interrupt-controller@20000 {
+ interrupt-controller@20000 {
reg = <0x20a00 0x1d0>, <0x21870 0x58>;
};
};
soc {
+ ranges = <0 0 0xd0000000 0x100000
+ 0xf0000000 0 0xf0000000 0x1000000>;
+
internal-regs {
serial@12000 {
clock-frequency = <250000000>;
};
soc {
+ ranges = <0 0 0xd0000000 0x100000
+ 0xf0000000 0 0xf0000000 0x8000000>;
+
internal-regs {
serial@12000 {
clock-frequency = <250000000>;
wt-override;
};
- mpic: interrupt-controller@20000 {
+ interrupt-controller@20000 {
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
};
atmel,pins =
<0 10 0x2 0x0 /* PA10 periph B */
0 11 0x2 0x0 /* PA11 periph B */
- 0 24 0x2 0x0 /* PA24 periph B */
+ 0 22 0x2 0x0 /* PA22 periph B */
0 25 0x2 0x0 /* PA25 periph B */
0 26 0x2 0x0 /* PA26 periph B */
0 27 0x2 0x0 /* PA27 periph B */
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
reg = <0xfffff000 0x200>;
+ atmel,external-irqs = <31>;
};
ramc0: ramc@ffffe800 {
/include/ "at91sam9x5ek.dtsi"
/ {
- model = "Atmel AT91SAM9G25-EK";
+ model = "Atmel AT91SAM9X25-EK";
compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
ahb {
reg = <0x7e201000 0x1000>;
interrupts = <2 25>;
clock-frequency = <3000000>;
+ arm,primecell-periphid = <0x00241011>;
};
gpio: gpio {
clock-names = "usbhost";
};
+ usbphy@12130000 {
+ compatible = "samsung,exynos5250-usb2phy";
+ reg = <0x12130000 0x100>;
+ clocks = <&clock 1>, <&clock 285>;
+ clock-names = "ext_xtal", "usbhost";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ usbphy-sys {
+ reg = <0x10040704 0x8>,
+ <0x10050230 0x4>;
+ };
+ };
+
amba {
#address-cells = <1>;
#size-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x43fa4000 0x4000>;
- clocks = <&clks 62>;
- clock-names = "ipg";
+ clocks = <&clks 62>, <&clks 62>;
+ clock-names = "ipg", "per";
interrupts = <14>;
status = "disabled";
};
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x50004000 0x4000>;
interrupts = <0>;
- clocks = <&clks 80>;
- clock-names = "ipg";
+ clocks = <&clks 80>, <&clks 80>;
+ clock-names = "ipg", "per";
status = "disabled";
};
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x50010000 0x4000>;
- clocks = <&clks 79>;
- clock-names = "ipg";
+ clocks = <&clks 79>, <&clks 79>;
+ clock-names = "ipg", "per";
interrupts = <13>;
status = "disabled";
};
compatible = "fsl,imx27-cspi";
reg = <0x1000e000 0x1000>;
interrupts = <16>;
- clocks = <&clks 53>, <&clks 0>;
+ clocks = <&clks 53>, <&clks 53>;
clock-names = "ipg", "per";
status = "disabled";
};
compatible = "fsl,imx27-cspi";
reg = <0x1000f000 0x1000>;
interrupts = <15>;
- clocks = <&clks 52>, <&clks 0>;
+ clocks = <&clks 52>, <&clks 52>;
clock-names = "ipg", "per";
status = "disabled";
};
compatible = "fsl,imx27-cspi";
reg = <0x10017000 0x1000>;
interrupts = <6>;
- clocks = <&clks 51>, <&clks 0>;
+ clocks = <&clks 51>, <&clks 51>;
clock-names = "ipg", "per";
status = "disabled";
};
compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
reg = <0x83fc0000 0x4000>;
interrupts = <38>;
- clocks = <&clks 55>, <&clks 0>;
+ clocks = <&clks 55>, <&clks 55>;
clock-names = "ipg", "per";
status = "disabled";
};
compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
reg = <0x63fc0000 0x4000>;
interrupts = <38>;
- clocks = <&clks 55>, <&clks 0>;
+ clocks = <&clks 55>, <&clks 55>;
clock-names = "ipg", "per";
status = "disabled";
};
usb_otg_hs: usb_otg_hs@480ab000 {
compatible = "ti,omap3-musb";
reg = <0x480ab000 0x1000>;
- interrupts = <0 92 0x4>, <0 93 0x4>;
+ interrupts = <92>, <93>;
interrupt-names = "mc", "dma";
ti,hwmods = "usb_otg_hs";
multipoint = <1>;
compatible = "atmel,at91sam9x5-spi";
reg = <0xf0004000 0x100>;
interrupts = <24 4 3>;
- cs-gpios = <&pioD 13 0
- &pioD 14 0 /* conflicts with SCK0 and CANRX0 */
- &pioD 15 0 /* conflicts with CTS0 and CANTX0 */
- &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */
- >;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
status = "disabled";
};
macb0: ethernet@f0028000 {
- compatible = "cnds,pc302-gem", "cdns,gem";
+ compatible = "cdns,pc302-gem", "cdns,gem";
reg = <0xf0028000 0x100>;
interrupts = <34 4 3>;
pinctrl-names = "default";
compatible = "atmel,at91sam9x5-spi";
reg = <0xf8008000 0x100>;
interrupts = <25 4 3>;
- cs-gpios = <&pioC 25 0
- &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */
- &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */
- &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */
- >;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
status = "disabled";
ahb {
apb {
+ spi0: spi@f0004000 {
+ cs-gpios = <&pioD 13 0>, <0>, <0>, <0>;
+ };
+
macb0: ethernet@f0028000 {
phy-mode = "rgmii";
};
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
};
+ /* This is where the interrupt is routed on the S8815 board */
+ external-bus@34000000 {
+ ethernet@300 {
+ interrupt-parent = <&gpio3>;
+ interrupts = <8 0x1>;
+ };
+ };
+
/* Custom board node with GPIO pins to active etc */
usb-s8815 {
/* The S8815 is using this very GPIO pin for the SMSC91x IRQs */
ethernet-gpio {
- gpios = <&gpio3 19 0x1>;
- interrupts = <19 0x1>;
- interrupt-parent = <&gpio3>;
+ gpios = <&gpio3 8 0x1>;
};
/* This will bias the MMC/SD card detect line */
mmcsd-gpio {
bootargs = "earlyprintk console=ttyS0,115200";
};
- soc {
- uart0: uart@01c28000 {
+ soc@01c20000 {
+ uart0: serial@01c28000 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
status = "okay";
#include <linux/smp.h>
#include <linux/spinlock.h>
-#include <linux/irqchip/arm-gic.h>
-
#include <asm/mcpm.h>
#include <asm/smp.h>
#include <asm/smp_plat.h>
static void __cpuinit mcpm_secondary_init(unsigned int cpu)
{
mcpm_cpu_powered_up();
- gic_secondary_init(0);
}
#ifdef CONFIG_HOTPLUG_CPU
-CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
CONFIG_ARCH_EXYNOS=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
+CONFIG_S3C_LOWLEVEL_UART_PORT=3
CONFIG_S3C24XX_PWM=y
CONFIG_ARCH_EXYNOS5=y
CONFIG_MACH_EXYNOS4_DT=y
-CONFIG_MACH_EXYNOS5_DT=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
CONFIG_INET=y
CONFIG_RFKILL_REGULATOR=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
CONFIG_NETDEVICES=y
CONFIG_SMSC911X=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
+CONFIG_KEYBOARD_CROS_EC=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_CYAPA=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_HW_RANDOM=y
+CONFIG_TCG_TPM=y
+CONFIG_TCG_TIS_I2C_INFINEON=y
CONFIG_I2C=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_ARB_GPIO_CHALLENGE=y
+CONFIG_I2C_S3C2410=y
+CONFIG_DEBUG_GPIO=y
# CONFIG_HWMON is not set
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_MAX77686=y
+CONFIG_MFD_MAX8997=y
+CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_TPS65090=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_MAX8997=y
+CONFIG_REGULATOR_MAX77686=y
+CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS65090=y
CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_SIMPLE=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
CONFIG_EXYNOS_DP=y
CONFIG_FONT_7x14=y
CONFIG_LOGO=y
CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_S5P=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_PHY=y
+CONFIG_SAMSUNG_USB2PHY=y
+CONFIG_SAMSUNG_USB3PHY=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_S3C=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_COMMON_CLK_MAX77686=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_CRYPTO_SHA256=y
CONFIG_CRC_CCITT=y
CONFIG_USB_DEBUG=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_MULTI_V6=y
CONFIG_ARCH_OMAP2PLUS=y
CONFIG_OMAP_RESET_CLOCKS=y
CONFIG_OMAP_MUX_DEBUG=y
CONFIG_USB_DEBUG=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
-CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
+CONFIG_TEGRA_HOST1X=y
CONFIG_DRM_TEGRA=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=y
CONFIG_SENSORS_ISL29028=y
-CONFIG_SENSORS_AK8975=y
+CONFIG_AK8975=y
CONFIG_MFD_NVEC=y
CONFIG_KEYBOARD_NVEC=y
CONFIG_SERIO_NVEC_PS2=y
add r3,r3,r10 @ E+=F_00_19(B,C,D)
cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
+ sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
- sub sp,sp,#25*4
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
atomic64_t, \
counter), \
- (unsigned long)(o), \
- (unsigned long)(n)))
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
local64_t, \
a), \
- (unsigned long)(o), \
- (unsigned long)(n)))
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb) 0
-#else
-#define tlb_fast_mode(tlb) 1
-#endif
-
#define MMU_GATHER_BUNDLE 8
/*
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
- if (!tlb_fast_mode(tlb)) {
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
- }
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ if (tlb->pages == tlb->local)
+ __tlb_alloc_page(tlb);
}
static inline void
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
- }
-
tlb->pages[tlb->nr++] = page;
VM_BUG_ON(tlb->nr > tlb->max);
return tlb->max - tlb->nr;
#define U8500_UART0_PHYS_BASE (0x80120000)
#define U8500_UART1_PHYS_BASE (0x80121000)
#define U8500_UART2_PHYS_BASE (0x80007000)
-#define U8500_UART0_VIRT_BASE (0xa8120000)
-#define U8500_UART1_VIRT_BASE (0xa8121000)
-#define U8500_UART2_VIRT_BASE (0xa8007000)
+#define U8500_UART0_VIRT_BASE (0xf8120000)
+#define U8500_UART1_VIRT_BASE (0xf8121000)
+#define U8500_UART2_VIRT_BASE (0xf8007000)
#define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE
#define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE
#endif
.vm_start = 0xffff0000,
.vm_end = 0xffff0000 + PAGE_SIZE,
.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
- .vm_mm = &init_mm,
};
static int __init gate_vma_init(void)
* this returns, power and/or clocks can be removed at any point
* from this CPU and its cache by platform_cpu_kill().
*/
- RCU_NONIDLE(complete(&cpu_died));
+ complete(&cpu_died);
/*
* Ensure that the cache lines associated with that completion are
wait_event_interruptible(*wq, !vcpu->arch.pause);
}
+static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.target >= 0;
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
int ret;
sigset_t sigsaved;
- /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
- if (unlikely(vcpu->arch.target < 0))
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
ret = kvm_vcpu_first_run_init(vcpu);
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
+
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ return -ENOEXEC;
+
if (copy_from_user(®, argp, sizeof(reg)))
return -EFAULT;
if (ioctl == KVM_SET_ONE_REG)
struct kvm_reg_list reg_list;
unsigned n;
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ return -ENOEXEC;
+
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+ /*
+ * This function also gets called when dealing with HYP page
+ * tables. As HYP doesn't have an associated struct kvm (and
+ * the HYP page tables are fairly static), we don't do
+ * anything there.
+ */
+ if (kvm)
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
return p;
}
-static void clear_pud_entry(pud_t *pud)
+static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table = pmd_offset(pud, 0);
pud_clear(pud);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
pmd_free(NULL, pmd_table);
put_page(virt_to_page(pud));
}
-static void clear_pmd_entry(pmd_t *pmd)
+static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
pte_t *pte_table = pte_offset_kernel(pmd, 0);
pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
pte_free_kernel(NULL, pte_table);
put_page(virt_to_page(pmd));
}
return page_count(pmd_page) == 1;
}
-static void clear_pte_entry(pte_t *pte)
+static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
if (pte_present(*pte)) {
kvm_set_pte(pte, __pte(0));
put_page(virt_to_page(pte));
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
}
}
return page_count(pte_page) == 1;
}
-static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
+static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+ unsigned long long start, u64 size)
{
pgd_t *pgd;
pud_t *pud;
}
pte = pte_offset_kernel(pmd, addr);
- clear_pte_entry(pte);
+ clear_pte_entry(kvm, pte, addr);
range = PAGE_SIZE;
/* If we emptied the pte, walk back up the ladder */
if (pte_empty(pte)) {
- clear_pmd_entry(pmd);
+ clear_pmd_entry(kvm, pmd, addr);
range = PMD_SIZE;
if (pmd_empty(pmd)) {
- clear_pud_entry(pud);
+ clear_pud_entry(kvm, pud, addr);
range = PUD_SIZE;
}
}
mutex_lock(&kvm_hyp_pgd_mutex);
if (boot_hyp_pgd) {
- unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+ unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+ unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(boot_hyp_pgd);
boot_hyp_pgd = NULL;
}
if (hyp_pgd)
- unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+ unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(init_bounce_page);
init_bounce_page = NULL;
if (hyp_pgd) {
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+ unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+ unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+
kfree(hyp_pgd);
hyp_pgd = NULL;
}
*/
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
{
- unmap_range(kvm->arch.pgd, start, size);
+ unmap_range(kvm, kvm->arch.pgd, start, size);
}
/**
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
- kvm_tlb_flush_vmid_ipa(kvm, gpa);
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
static struct clock_event_device clkevt = {
.name = "at91_tick",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
.rating = 150,
.set_next_event = clkevt32k_next_event,
.set_mode = clkevt32k_mode,
at91_st_write(AT91_ST_RTMR, 1);
/* Setup timer clockevent, with minimum of two ticks (important!!) */
+ clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
+ clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
+ clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
clkevt.cpumask = cpumask_of(0);
- clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
- 2, AT91_ST_ALMV);
+ clockevents_register_device(&clkevt);
/* register clocksource */
clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE);
}
-void __init at91sam9n12_initialize(void)
-{
- at91_extern_irq = (1 << AT91SAM9N12_ID_IRQ0);
-}
-
AT91_SOC_START(at91sam9n12)
.map_io = at91sam9n12_map_io,
.register_clocks = at91sam9n12_register_clocks,
- .init = at91sam9n12_initialize,
AT91_SOC_END
#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */
#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */
-#define AT91_PMC_PCR_DIV2 0x2 /* Peripheral clock is MCK/2 */
-#define AT91_PMC_PCR_DIV4 0x4 /* Peripheral clock is MCK/4 */
-#define AT91_PMC_PCR_DIV8 0x8 /* Peripheral clock is MCK/8 */
+#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */
+#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */
+#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */
#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
#endif
config MACH_UNIVERSAL_C210
bool "Mobile UNIVERSAL_C210 Board"
select CLKSRC_MMIO
+ select CLKSRC_SAMSUNG_PWM
select CPU_EXYNOS4210
select EXYNOS4_SETUP_FIMC
select EXYNOS4_SETUP_FIMD0
select S5P_DEV_TV
select S5P_GPIO_INT
select S5P_SETUP_MIPIPHY
- select SAMSUNG_HRT
help
Machine support for Samsung Mobile Universal S5PC210 Reference
Board.
depends on ARCH_EXYNOS4
select ARM_AMBA
select CLKSRC_OF
+ select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
select CPU_EXYNOS4210
select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
select PINCTRL
*/
#include <linux/kernel.h>
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/gpio.h>
+#include <clocksource/samsung_pwm.h>
#include <linux/sched.h>
#include <linux/serial_core.h>
#include <linux/of.h>
},
};
+static struct samsung_pwm_variant exynos4_pwm_variant = {
+ .bits = 32,
+ .div_base = 0,
+ .has_tint_cstat = true,
+ .tclk_mask = 0,
+};
+
void exynos4_restart(char mode, const char *cmd)
{
__raw_writel(0x1, S5P_SWRESET);
val = 0x1;
addr = EXYNOS_SWRESET;
} else if (of_machine_is_compatible("samsung,exynos5440")) {
+ u32 status;
np = of_find_compatible_node(NULL, NULL, "samsung,exynos5440-clock");
+
+ addr = of_iomap(np, 0) + 0xbc;
+ status = __raw_readl(addr);
+
addr = of_iomap(np, 0) + 0xcc;
- val = (0xfff << 20) | (0x1 << 16);
+ val = __raw_readl(addr);
+
+ val = (val & 0xffff0000) | (status & 0xffff);
} else {
pr_err("%s: cannot support non-DT\n", __func__);
return;
void __init exynos_init_io(struct map_desc *mach_desc, int size)
{
+ debug_ll_io_init();
+
#ifdef CONFIG_OF
if (initial_boot_params)
of_scan_flat_dt(exynos_fdt_map_chipid, NULL);
iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
}
+void __init exynos_set_timer_source(u8 channels)
+{
+ exynos4_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+ exynos4_pwm_variant.output_mask &= ~channels;
+}
+
void __init exynos_init_time(void)
{
+ unsigned int timer_irqs[SAMSUNG_PWM_NUM] = {
+ EXYNOS4_IRQ_TIMER0_VIC, EXYNOS4_IRQ_TIMER1_VIC,
+ EXYNOS4_IRQ_TIMER2_VIC, EXYNOS4_IRQ_TIMER3_VIC,
+ EXYNOS4_IRQ_TIMER4_VIC,
+ };
+
if (of_have_populated_dt()) {
#ifdef CONFIG_OF
of_clk_init(NULL);
exynos4_clk_init(NULL, !soc_is_exynos4210(), S5P_VA_CMU, readl(S5P_VA_CHIPID + 8) & 1);
exynos4_clk_register_fixed_ext(xxti_f, xusbxti_f);
#endif
- mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0, EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1);
+#ifdef CONFIG_CLKSRC_SAMSUNG_PWM
+ if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
+ samsung_pwm_clocksource_init(S3C_VA_TIMER,
+ timer_irqs, &exynos4_pwm_variant);
+ else
+#endif
+ mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0,
+ EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1);
}
}
void exynos_firmware_init(void);
+void exynos_set_timer_source(u8 channels);
+
#ifdef CONFIG_PM_GENERIC_DOMAINS
int exynos_pm_late_initcall(void);
#else
#ifndef __ASM_ARCH_PM_CORE_H
#define __ASM_ARCH_PM_CORE_H __FILE__
+#include <linux/of.h>
#include <mach/regs-pmu.h>
+#ifdef CONFIG_PINCTRL_EXYNOS
+extern u32 exynos_get_eint_wake_mask(void);
+#else
+static inline u32 exynos_get_eint_wake_mask(void) { return 0xffffffff; }
+#endif
+
static inline void s3c_pm_debug_init_uart(void)
{
/* nothing here yet */
static inline void s3c_pm_arch_prepare_irqs(void)
{
- __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+ u32 eintmask = s3c_irqwake_eintmask;
+
+ if (of_have_populated_dt())
+ eintmask = exynos_get_eint_wake_mask();
+
+ __raw_writel(eintmask, S5P_EINT_WAKEUP_MASK);
__raw_writel(s3c_irqwake_intmask & ~(1 << 31), S5P_WAKEUP_MASK);
}
#include <plat/mfc.h>
#include <plat/sdhci.h>
#include <plat/fimc-core.h>
-#include <plat/samsung-time.h>
#include <plat/camport.h>
#include <mach/map.h>
{
exynos_init_io(NULL, 0);
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
- samsung_set_timer_source(SAMSUNG_PWM2, SAMSUNG_PWM4);
+ exynos_set_timer_source(BIT(2) | BIT(4));
xxti_f = 0;
xusbxti_f = 24000000;
}
.map_io = universal_map_io,
.init_machine = universal_machine_init,
.init_late = exynos_init_late,
- .init_time = samsung_timer_init,
+ .init_time = exynos_init_time,
.reserve = &universal_reserve,
.restart = exynos4_restart,
MACHINE_END
static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
-static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
-static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
static const char *gpu_axi_sels[] = { "axi", "ahb", };
static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
-static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
- clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
- clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels));
clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
- clk[mlb] = imx_clk_gate2("mlb", "pll8_mlb", base + 0x74, 18);
+ clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
.section ".text.head", "ax"
#ifdef CONFIG_SMP
+diag_reg_offset:
+ .word g_diag_reg - .
+
+ .macro set_diag_reg
+ adr r0, diag_reg_offset
+ ldr r1, [r0]
+ add r1, r1, r0 @ r1 = physical &g_diag_reg
+ ldr r0, [r1]
+ mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
+ .endm
+
ENTRY(v7_secondary_startup)
bl v7_invalidate_l1
+ set_diag_reg
b secondary_startup
ENDPROC(v7_secondary_startup)
#endif
#include <linux/init.h>
#include <linux/smp.h>
+#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>
#define SCU_STANDBY_ENABLE (1 << 5)
+u32 g_diag_reg;
static void __iomem *scu_base;
static struct map_desc scu_io_desc __initdata = {
static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
{
imx_smp_prepare();
+
+ /*
+ * The diagnostic register holds the errata bits. Mostly bootloader
+ * does not bring up secondary cores, so that when errata bits are set
+ * in bootloader, they are set only for boot cpu. But on a SMP
+ * configuration, it should be equally done on every single core.
+ * Read the register from boot cpu here, and will replicate it into
+ * secondary cores when booting them.
+ */
+ asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc");
+ __cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg));
+ outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1));
}
struct smp_operations imx_smp_ops __initdata = {
pm_power_off = qnap_tsx1x_power_off;
}
-
-/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
-static int __init ts219_pci_init(void)
-{
- if (machine_is_ts219())
- kirkwood_pcie_init(KW_PCIE0);
-
- return 0;
-}
-subsys_initcall(ts219_pci_init);
{
orion_time_set_base(TIMER_VIRT_BASE);
- /*
- * Some Kirkwood devices allocate their coherent buffers from atomic
- * context. Increase size of atomic coherent pool to make sure such
- * the allocations won't fail.
- */
- init_dma_coherent_pool_size(SZ_1M);
mvebu_mbus_init("marvell,kirkwood-mbus",
BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
static int __init ts219_pci_init(void)
{
if (machine_is_ts219())
- kirkwood_pcie_init(KW_PCIE0);
+ kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
return 0;
}
select MVEBU_CLK_GATING
select MVEBU_MBUS
select ZONE_DMA if ARM_LPAE
+ select ARCH_REQUIRE_GPIOLIB
if ARCH_MVEBU
{
char *mbus_soc_name;
- /*
- * Some Armada 370/XP devices allocate their coherent buffers
- * from atomic context. Increase size of atomic coherent pool
- * to make sure such the allocations won't fail.
- */
- init_dma_coherent_pool_size(SZ_1M);
-
/*
* This initialization will be replaced by a DT-based
* initialization once the mvebu-mbus driver gains DT support.
/* Add CPU to SMP group - Atomic */
add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
- ldr r2, [r3]
+1:
+ ldrex r2, [r3]
orr r2, r2, r1
- str r2, [r3]
+ strex r0, r2, [r3]
+ cmp r0, #0
+ bne 1b
/* Enable coherency on CPU - Atomic */
- add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET
- ldr r2, [r3]
+ add r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET
+1:
+ ldrex r2, [r3]
orr r2, r2, r1
- str r2, [r3]
+ strex r0, r2, [r3]
+ cmp r0, #0
+ bne 1b
dsb
dev_err(&pdev->dev,
"%s: Memory allocation failed for d->chan!\n",
__func__);
+ ret = -ENOMEM;
goto exit_release_d;
}
*/
DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
-DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0,
- AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
- 0x0, NULL);
+static struct clk clkdiv32k_ick;
+
+static const char *clkdiv32k_ick_parent_names[] = {
+ "clkdiv32k_ck",
+};
+
+static const struct clk_ops clkdiv32k_ick_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .init = &omap2_init_clk_clkdm,
+};
+
+static struct clk_hw_omap clkdiv32k_ick_hw = {
+ .hw = {
+ .clk = &clkdiv32k_ick,
+ },
+ .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT,
+ .clkdm_name = "clk_24mhz_clkdm",
+};
+
+DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops);
/* "usbotg_fck" is an additional clock and not really a modulemode */
DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
clkdm = _get_clkdm(oh);
if (sf & SYSC_HAS_SIDLEMODE) {
+ if (oh->flags & HWMOD_SWSUP_SIDLE ||
+ oh->flags & HWMOD_SWSUP_SIDLE_ACT) {
+ idlemode = HWMOD_IDLEMODE_NO;
+ } else {
+ if (sf & SYSC_HAS_ENAWAKEUP)
+ _enable_wakeup(oh, &v);
+ if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+ idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ }
+
+ /*
+ * This is special handling for some IPs like
+ * 32k sync timer. Force them to idle!
+ */
clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU);
if (clkdm_act && !(oh->class->sysc->idlemodes &
(SIDLE_SMART | SIDLE_SMART_WKUP)))
idlemode = HWMOD_IDLEMODE_FORCE;
- else
- idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
- HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+
_set_slave_idlemode(oh, idlemode, &v);
}
(sf & SYSC_HAS_CLOCKACTIVITY))
_set_clockactivity(oh, oh->class->sysc->clockact, &v);
- /* If slave is in SMARTIDLE, also enable wakeup */
- if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
- _enable_wakeup(oh, &v);
-
_write_sysconfig(v, oh);
/*
sf = oh->class->sysc->sysc_flags;
if (sf & SYSC_HAS_SIDLEMODE) {
- /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
- if (oh->flags & HWMOD_SWSUP_SIDLE ||
- !(oh->class->sysc->idlemodes &
- (SIDLE_SMART | SIDLE_SMART_WKUP)))
+ if (oh->flags & HWMOD_SWSUP_SIDLE) {
idlemode = HWMOD_IDLEMODE_FORCE;
- else
- idlemode = HWMOD_IDLEMODE_SMART;
+ } else {
+ if (sf & SYSC_HAS_ENAWAKEUP)
+ _enable_wakeup(oh, &v);
+ if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+ idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ }
_set_slave_idlemode(oh, idlemode, &v);
}
_set_master_standbymode(oh, idlemode, &v);
}
- /* If slave is in SMARTIDLE, also enable wakeup */
- if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
- _enable_wakeup(oh, &v);
-
_write_sysconfig(v, oh);
}
* do so is present in the hwmod data, then call it and pass along the
* return value; otherwise, return 0.
*/
-static int __init _enable_preprogram(struct omap_hwmod *oh)
+static int _enable_preprogram(struct omap_hwmod *oh)
{
if (!oh->class->enable_preprogram)
return 0;
return 0;
}
-/**
- * omap_hwmod_set_ocp_autoidle - set the hwmod's OCP autoidle bit
- * @oh: struct omap_hwmod *
- * @autoidle: desired AUTOIDLE bitfield value (0 or 1)
- *
- * Sets the IP block's OCP autoidle bit in hardware, and updates our
- * local copy. Intended to be used by drivers that require
- * direct manipulation of the AUTOIDLE bits.
- * Returns -EINVAL if @oh is null or is not in the ENABLED state, or passes
- * along the return value from _set_module_autoidle().
- *
- * Any users of this function should be scrutinized carefully.
- */
-int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle)
-{
- u32 v;
- int retval = 0;
- unsigned long flags;
-
- if (!oh || oh->_state != _HWMOD_STATE_ENABLED)
- return -EINVAL;
-
- spin_lock_irqsave(&oh->_lock, flags);
-
- v = oh->_sysc_cache;
-
- retval = _set_module_autoidle(oh, autoidle, &v);
-
- if (!retval)
- _write_sysconfig(v, oh);
-
- spin_unlock_irqrestore(&oh->_lock, flags);
-
- return retval;
-}
-
/**
* _shutdown - shutdown an omap_hwmod
* @oh: struct omap_hwmod *
return ret;
}
-/**
- * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode
- * @oh: struct omap_hwmod *
- * @idlemode: SIDLEMODE field bits (shifted to bit 0)
- *
- * Sets the IP block's OCP slave idlemode in hardware, and updates our
- * local copy. Intended to be used by drivers that have some erratum
- * that requires direct manipulation of the SIDLEMODE bits. Returns
- * -EINVAL if @oh is null, or passes along the return value from
- * _set_slave_idlemode().
- *
- * XXX Does this function have any current users? If not, we should
- * remove it; it is better to let the rest of the hwmod code handle this.
- * Any users of this function should be scrutinized carefully.
- */
-int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode)
-{
- u32 v;
- int retval = 0;
-
- if (!oh)
- return -EINVAL;
-
- v = oh->_sysc_cache;
-
- retval = _set_slave_idlemode(oh, idlemode, &v);
- if (!retval)
- _write_sysconfig(v, oh);
-
- return retval;
-}
-
/**
* omap_hwmod_lookup - look up a registered omap_hwmod by name
* @name: name of the omap_hwmod to look up
* is kept in force-standby mode. Failing to do so causes PM problems
* with musb on OMAP3630 at least. Note that musb has a dedicated register
* to control MSTANDBY signal when MIDLEMODE is set to force-standby.
+ * HWMOD_SWSUP_SIDLE_ACT: omap_hwmod code should manually bring the module
+ * out of idle, but rely on smart-idle to the put it back in idle,
+ * so the wakeups are still functional (Only known case for now is UART)
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
#define HWMOD_BLOCK_WFI (1 << 10)
#define HWMOD_FORCE_MSTANDBY (1 << 11)
+#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
/*
* omap_hwmod._int_flags definitions
int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
-int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode);
-int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle);
-
int omap_hwmod_reset(struct omap_hwmod *oh);
void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.name = "uart1",
.class = &uart_class,
.clkdm_name = "l4_wkup_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart1_irqs,
.sdma_reqs = uart1_edma_reqs,
.main_clk = "dpll_per_m2_div4_wkupdm_ck",
.name = "uart2",
.class = &uart_class,
.clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart2_irqs,
.sdma_reqs = uart1_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.name = "uart3",
.class = &uart_class,
.clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart3_irqs,
.sdma_reqs = uart3_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.name = "uart4",
.class = &uart_class,
.clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart4_irqs,
.sdma_reqs = uart1_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.name = "uart5",
.class = &uart_class,
.clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart5_irqs,
.sdma_reqs = uart1_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.name = "uart6",
.class = &uart_class,
.clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart6_irqs,
.sdma_reqs = uart1_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
.mpu_irqs = uart4_mpu_irqs,
.sdma_reqs = uart4_sdma_reqs,
.main_clk = "uart4_fck",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
.name = "uart1",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = omap44xx_uart1_irqs,
.sdma_reqs = omap44xx_uart1_sdma_reqs,
.main_clk = "func_48m_fclk",
.name = "uart2",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = omap44xx_uart2_irqs,
.sdma_reqs = omap44xx_uart2_sdma_reqs,
.main_clk = "func_48m_fclk",
.name = "uart3",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
+ HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = omap44xx_uart3_irqs,
.sdma_reqs = omap44xx_uart3_sdma_reqs,
.main_clk = "func_48m_fclk",
.name = "uart4",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = omap44xx_uart4_irqs,
.sdma_reqs = omap44xx_uart4_sdma_reqs,
.main_clk = "func_48m_fclk",
omap_hwmod_disable_wakeup(od->hwmods[0]);
}
-/*
- * Errata i291: [UART]:Cannot Acknowledge Idle Requests
- * in Smartidle Mode When Configured for DMA Operations.
- * WA: configure uart in force idle mode.
- */
-static void omap_uart_set_noidle(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_device *od = to_omap_device(pdev);
-
- omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
-}
-
-static void omap_uart_set_smartidle(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_device *od = to_omap_device(pdev);
- u8 idlemode;
-
- if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP)
- idlemode = HWMOD_IDLEMODE_SMART_WKUP;
- else
- idlemode = HWMOD_IDLEMODE_SMART;
-
- omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode);
-}
-
#else
static void omap_uart_enable_wakeup(struct device *dev, bool enable)
{}
-static void omap_uart_set_noidle(struct device *dev) {}
-static void omap_uart_set_smartidle(struct device *dev) {}
#endif /* CONFIG_PM */
#ifdef CONFIG_OMAP_MUX
omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
omap_up.flags = UPF_BOOT_AUTOCONF;
omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
- omap_up.set_forceidle = omap_uart_set_smartidle;
- omap_up.set_noidle = omap_uart_set_noidle;
omap_up.enable_wakeup = omap_uart_enable_wakeup;
omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
omap_up.dma_rx_timeout = info->dma_rx_timeout;
orion_time_set_base(TIMER_VIRT_BASE);
- /*
- * Some Orion5x devices allocate their coherent buffers from atomic
- * context. Increase size of atomic coherent pool to make sure such
- * the allocations won't fail.
- */
- init_dma_coherent_pool_size(SZ_1M);
-
/* Initialize the MBUS driver */
orion5x_pcie_id(&dev, &rev);
if (dev == MV88F5281_DEV_ID)
static struct usb_phy *phy;
static int usb_power_on(struct platform_device *pdev)
{
- if (!phy)
- return -EIO;
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
static void usb_power_off(struct platform_device *pdev)
{
- if (!phy)
+ if (IS_ERR(phy))
return;
usb_phy_shutdown(phy);
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clockevent_rating = 125,
+ .clockevent_rating = 80,
.clocksource_rating = 125,
};
config ARCH_SUNXI
bool "Allwinner A1X SOCs" if ARCH_MULTI_V7
+ select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CLKSRC_OF
select COMMON_CLK
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "missing register base\n");
- return -ENOMEM;
- }
-
emc_regbase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(emc_regbase))
return PTR_ERR(emc_regbase);
bool "U8500 Development platform, MOP500 versions"
select I2C
select I2C_NOMADIK
+ select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select SOC_BUS
select UX500_SOC_DB8500
static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
/* supplies to the display/camera */
[AB8500_LDO_AUX1] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-DISPLAY",
.min_uV = 2800000,
},
/* supplies to the on-board eMMC */
[AB8500_LDO_AUX2] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-eMMC1",
.min_uV = 1100000,
},
/* supply for VAUX3, supplies to SDcard slots */
[AB8500_LDO_AUX3] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-MMC-SD",
.min_uV = 1100000,
sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL;
mop500_pinmaps_init();
- parent = u8500_init_devices(&ab8500_platdata);
+ parent = u8500_init_devices();
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO;
snowball_pinmaps_init();
- parent = u8500_init_devices(&ab8500_platdata);
+ parent = u8500_init_devices();
for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
snowball_platform_devs[i]->dev.parent = parent;
sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO;
hrefv60_pinmaps_init();
- parent = u8500_init_devices(&ab8500_platdata);
+ parent = u8500_init_devices();
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
/*
* This function is called from the board init
*/
-struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
+struct device * __init u8500_init_devices(void)
{
struct device *parent;
int i;
for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
platform_devs[i]->dev.parent = parent;
- db8500_prcmu_device.dev.platform_data = ab8500;
-
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
return parent;
OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
&db8500_prcmu_pdata),
- OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x", NULL),
+ OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL),
/* Requires device name bindings. */
OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
"pinctrl-db8500", NULL),
#include <asm/proc-fns.h>
#include "db8500-regs.h"
+#include "id.h"
static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);
int __init ux500_idle_init(void)
{
+ if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
+ return -ENODEV;
+
/* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
PRCMU_WAKEUP(ABB));
void __init ux500_map_io(void);
extern void __init u8500_map_io(void);
-extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500);
+extern struct device * __init u8500_init_devices(void);
extern void __init ux500_init_irq(void);
extern void __init ux500_init_late(void);
"wm,wm8505",
"wm,wm8750",
"wm,wm8850",
+ NULL
};
DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
static struct platform_device orion_ge10_shared = {
.name = MV643XX_ETH_SHARED_NAME,
- .id = 1,
+ .id = 2,
.dev = {
.platform_data = &orion_ge10_shared_data,
},
static struct platform_device orion_ge10 = {
.name = MV643XX_ETH_NAME,
- .id = 1,
- .num_resources = 2,
+ .id = 2,
+ .num_resources = 1,
.resource = orion_ge10_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
static struct platform_device orion_ge11_shared = {
.name = MV643XX_ETH_SHARED_NAME,
- .id = 1,
+ .id = 3,
.dev = {
.platform_data = &orion_ge11_shared_data,
},
static struct platform_device orion_ge11 = {
.name = MV643XX_ETH_NAME,
- .id = 1,
- .num_resources = 2,
+ .id = 3,
+ .num_resources = 1,
.resource = orion_ge11_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
#ifndef __PLAT_COMMON_H
#include <linux/mv643xx_eth.h>
+#include <linux/platform_data/usb-ehci-orion.h>
struct dsa_platform_data;
struct mv_sata_platform_data;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(dev, "failed to find registers\n");
- return -ENXIO;
- }
-
adc->regs = devm_ioremap_resource(dev, regs);
if (IS_ERR(adc->regs))
return PTR_ERR(adc->regs);
#ifdef CONFIG_S5P_DEV_FIMD0
static struct resource s5p_fimd0_resource[] = {
[0] = DEFINE_RES_MEM(S5P_PA_FIMD0, SZ_32K),
- [1] = DEFINE_RES_IRQ(IRQ_FIMD0_VSYNC),
- [2] = DEFINE_RES_IRQ(IRQ_FIMD0_FIFO),
- [3] = DEFINE_RES_IRQ(IRQ_FIMD0_SYSTEM),
+ [1] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_VSYNC, "vsync"),
+ [2] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_FIFO, "fifo"),
+ [3] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_SYSTEM, "lcd_sys"),
};
struct platform_device s5p_device_fimd0 = {
static void putc(int ch)
{
+ if (!config_enabled(CONFIG_DEBUG_LL))
+ return;
+
if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) {
int level;
#ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
static inline void arch_enable_uart_fifo(void)
{
- u32 fifocon = uart_rd(S3C2410_UFCON);
+ u32 fifocon;
+
+ if (!config_enabled(CONFIG_DEBUG_LL))
+ return;
+
+ fifocon = uart_rd(S3C2410_UFCON);
if (!(fifocon & S3C2410_UFCON_FIFOMODE)) {
fifocon |= S3C2410_UFCON_RESETBOTH;
str r11, [r10, #TI_PREEMPT]
#endif
ldr r0, VFP_arch_address
- str r5, [r0] @ known non-zero value
+ str r0, [r0] @ set to non-zero value
mov pc, r9 @ we have handled the fault
ENDPROC(vfp_testing_entry)
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
-static int __init xen_secondary_init(unsigned int cpu)
+static void __init xen_percpu_init(void *unused)
{
struct vcpu_register_vcpu_info info;
struct vcpu_info *vcpup;
int err;
+ int cpu = get_cpu();
pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
info.offset = offset_in_page(vcpup);
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
- if (err) {
- pr_debug("register_vcpu_info failed: err=%d\n", err);
- } else {
- /* This cpu is using the registered vcpu info, even if
- later ones fail to. */
- per_cpu(xen_vcpu, cpu) = vcpup;
- }
- return 0;
+ BUG_ON(err);
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+ enable_percpu_irq(xen_events_irq, 0);
}
static void xen_restart(char str, const char *cmd)
const char *version = NULL;
const char *xen_prefix = "xen,xen-";
struct resource res;
- int i;
node = of_find_compatible_node(NULL, NULL, "xen,xen");
if (!node) {
sizeof(struct vcpu_info));
if (xen_vcpu_info == NULL)
return -ENOMEM;
- for_each_online_cpu(i)
- xen_secondary_init(i);
gnttab_init();
if (!xen_initial_domain())
xenbus_probe(NULL);
+ return 0;
+}
+core_initcall(xen_guest_init);
+
+static int __init xen_pm_init(void)
+{
pm_power_off = xen_power_off;
arm_pm_restart = xen_restart;
return 0;
}
-core_initcall(xen_guest_init);
+subsys_initcall(xen_pm_init);
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
return IRQ_HANDLED;
}
-static __init void xen_percpu_enable_events(void *unused)
-{
- enable_percpu_irq(xen_events_irq, 0);
-}
-
static int __init xen_init_events(void)
{
if (!xen_domain() || xen_events_irq < 0)
return -EINVAL;
}
- on_each_cpu(xen_percpu_enable_events, NULL, 0);
+ on_each_cpu(xen_percpu_init, NULL, 0);
return 0;
}
menu "Kernel Features"
-source "kernel/time/Kconfig"
-
config ARM64_64K_PAGES
bool "Enable 64KB pages support"
help
.macro enable_dbg_if_not_stepping, tmp
mrs \tmp, mdscr_el1
- tbnz \tmp, #1, 9990f
+ tbnz \tmp, #0, 9990f
enable_dbg
9990:
.endm
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
*/
static void clear_os_lock(void *unused)
{
- asm volatile("msr mdscr_el1, %0" : : "r" (0));
- isb();
asm volatile("msr oslar_el1, %0" : : "r" (0));
isb();
}
}
}
-static struct console early_console = {
+static struct console early_console_dev = {
.name = "earlycon",
.write = early_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
early_base = early_io_map(paddr, EARLYCON_IOBASE);
printch = match->printch;
- register_console(&early_console);
+ early_console = &early_console_dev;
+ register_console(&early_console_dev);
return 0;
}
b.eq el0_fpsimd_exc
cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
+ cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
+ b.eq el0_undef
+ cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
+ b.eq el0_undef
+ cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
+ b.eq el0_undef
+ cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
+ b.eq el0_undef
+ cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
+ b.eq el0_undef
cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
b.ge el0_dbg
b el0_inv
#endif
}
-static int __init arm64_of_clk_init(void)
+static int __init arm64_device_init(void)
{
of_clk_init(NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_of_clk_init);
+arch_initcall(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data);
}
subsys_initcall(topology_init);
-static int __init arm64_device_probe(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-device_initcall(arm64_device_probe);
-
static const char *hwcap_str[] = {
"fp",
"asimd",
return;
#endif
- if (show_unhandled_signals) {
+ if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
+ printk_ratelimit()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs);
}
#endif
- if (show_unhandled_signals) {
+ if (show_unhandled_signals && printk_ratelimit()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), (int)regs->syscallno);
dump_instr("", regs);
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
+ siginfo_t info;
+ void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
handler[reason], esr);
+ __show_regs(regs);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = pc;
- die("Oops - bad mode", regs, 0);
- local_irq_disable();
- panic("bad mode");
+ arm64_notify_die("Oops - bad mode", regs, &info, 0);
}
void __pte_error(const char *file, int line, unsigned long val)
add x2, x2, #4 // add 4 (line length offset)
mov x4, #0x3ff
and x4, x4, x1, lsr #3 // find maximum number on the way size
- clz x5, x4 // find bit position of way size increment
+ clz w5, w4 // find bit position of way size increment
mov x7, #0x7fff
and x7, x7, x1, lsr #13 // extract max number of the index size
loop2:
{
struct siginfo si;
- if (show_unhandled_signals) {
+ if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
+ printk_ratelimit()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
addr, esr);
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
- mov x0, #1
- msr oslar_el1, x0 // Set the debug OS lock
+ msr mdscr_el1, xzr // Reset mdscr_el1
tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
config ARCH_SPARSEMEM_ENABLE
def_bool n
+config NODES_SHIFT
+ int
+ default "2"
+ depends on NEED_MULTIPLE_NODES
+
source "mm/Kconfig"
config OWNERSHIP_TRACE
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += param.h
+++ /dev/null
-#ifndef __ASM_AVR32_NUMNODES_H
-#define __ASM_AVR32_NUMNODES_H
-
-/* Max 4 nodes */
-#define NODES_SHIFT 2
-
-#endif /* __ASM_AVR32_NUMNODES_H */
+++ /dev/null
-#ifndef __ASM_AVR32_PARAM_H
-#define __ASM_AVR32_PARAM_H
-
-#include <uapi/asm/param.h>
-
-# define HZ CONFIG_HZ
-# define USER_HZ 100 /* User interfaces are in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
-#endif /* __ASM_AVR32_PARAM_H */
header-y += termios.h
header-y += types.h
header-y += unistd.h
+generic-y += param.h
+++ /dev/null
-#ifndef _UAPI__ASM_AVR32_PARAM_H
-#define _UAPI__ASM_AVR32_PARAM_H
-
-
-#ifndef HZ
-# define HZ 100
-#endif
-
-/* TODO: Should be configurable */
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-# define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64
-
-#endif /* _UAPI__ASM_AVR32_PARAM_H */
break;
case R_AVR32_GOT18SW:
if ((relocation & 0xfffe0003) != 0
- && (relocation & 0xfffc0003) != 0xffff0000)
+ && (relocation & 0xfffc0000) != 0xfffc0000)
return reloc_overflow(module, "R_AVR32_GOT18SW",
relocation);
relocation >>= 2;
#include <asm/tlbflush.h>
#include <asm/machvec.h>
-#ifdef CONFIG_SMP
-# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
-#else
-# define tlb_fast_mode(tlb) (1)
-#endif
-
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
struct mmu_gather {
struct mm_struct *mm;
- unsigned int nr; /* == ~0U => fast mode */
+ unsigned int nr;
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
+ unsigned long i;
unsigned int nr;
if (!tlb->need_flush)
/* lastly, release the freed pages */
nr = tlb->nr;
- if (!tlb_fast_mode(tlb)) {
- unsigned long i;
- tlb->nr = 0;
- tlb->start_addr = ~0UL;
- for (i = 0; i < nr; ++i)
- free_page_and_swap_cache(tlb->pages[i]);
- }
+
+ tlb->nr = 0;
+ tlb->start_addr = ~0UL;
+ for (i = 0; i < nr; ++i)
+ free_page_and_swap_cache(tlb->pages[i]);
}
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
- /*
- * Use fast mode if only 1 CPU is online.
- *
- * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
- * doesn't work because of speculative accesses and software prefetching: the page
- * table of "mm" may (and usually is) the currently active page table and even
- * though the kernel won't do any user-space accesses during the TLB shoot down, a
- * compiler might use speculation or lfetch.fault on what happens to be a valid
- * user-space address. This in turn could trigger a TLB miss fault (or a VHPT
- * walk) and re-insert a TLB entry we just removed. Slow mode avoids such
- * problems. (We could make fast-mode work by switching the current task to a
- * different "mm" during the shootdown.) --davidm 08/02/2002
- */
- tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
+ tlb->nr = 0;
tlb->fullmm = full_mm_flush;
tlb->start_addr = ~0UL;
}
{
tlb->need_flush = 1;
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
- }
-
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-amiga"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_AMIGA=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
CONFIG_M68060=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
+CONFIG_AMIGA=y
CONFIG_ZORRO=y
CONFIG_AMIGA_PCMCIA=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_ZORRO_NAMES=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_AMIGA=m
CONFIG_AMIGA_Z2RAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GAYLE=y
CONFIG_BLK_DEV_BUDDHA=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_A3000_SCSI=y
CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_3COM is not set
CONFIG_A2065=y
+CONFIG_ARIADNE=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_HYDRA=y
-CONFIG_ZORRO8390=y
CONFIG_APNE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_AMIGA=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
CONFIG_INPUT_M68K_BEEP=m
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_PRINTER=m
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_DMASOUND_PAULA=m
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_RP5C01=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-apollo"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_APOLLO=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
CONFIG_M68060=y
+CONFIG_APOLLO=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-atari"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_ATARI=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
CONFIG_M68060=y
+CONFIG_ATARI=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_STRAM_PROC=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_ATARI=m
CONFIG_ATARI_FLOPPY=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_ATARI_SCSI=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
-CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_VETH=m
CONFIG_ATARILANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_ATARI=y
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_ATARI=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_PRINTER=m
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_ATARI=y
CONFIG_DMASOUND_ATARI=m
CONFIG_HID=m
CONFIG_HIDRAW=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_UHID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
CONFIG_ATARI_DSP56K=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-bvme6000"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_VME=y
-CONFIG_BVME6000=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68040=y
CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_BVME6000=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_BVME6000_SCSI=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_BVME6000_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
-CONFIG_CRC32=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-hp300"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_HP300=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
CONFIG_M68060=y
+CONFIG_HP300=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
CONFIG_HPLANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_HP_SDC_RTC=m
-# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_SERPORT=m
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-mac"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_MAC=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_MAC=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_IPDDP_DECAP=y
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
-CONFIG_BLK_DEV_SWIM=y
+CONFIG_BLK_DEV_SWIM=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_ADB=y
CONFIG_ADB_MACII=y
-CONFIG_ADB_MACIISI=y
CONFIG_ADB_IOP=y
CONFIG_ADB_PMU68K=y
CONFIG_ADB_CUDA=y
CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MAC8390=y
-CONFIG_MAC89x0=m
-CONFIG_MACSONIC=m
CONFIG_MACMACE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_MACSONIC=y
+CONFIG_MAC8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_PMACZILOG=y
CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_SERIAL_PMACZILOG_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_VALKYRIE=y
CONFIG_LOGO=y
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=y
-CONFIG_HFSPLUS_FS=y
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
+CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-multi"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_M68020=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_M68KFPU_EMU=y
CONFIG_AMIGA=y
CONFIG_ATARI=y
CONFIG_MAC=y
CONFIG_HP300=y
CONFIG_SUN3X=y
CONFIG_Q40=y
-CONFIG_M68020=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
CONFIG_ZORRO=y
CONFIG_AMIGA_PCMCIA=y
-CONFIG_STRAM_PROC=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_ZORRO_NAMES=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_IPDDP_DECAP=y
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_AMIGA=m
CONFIG_PARPORT_1284=y
CONFIG_AMIGA_FLOPPY=y
CONFIG_ATARI_FLOPPY=y
-CONFIG_BLK_DEV_SWIM=y
+CONFIG_BLK_DEV_SWIM=m
CONFIG_AMIGA_Z2RAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GAYLE=y
CONFIG_BLK_DEV_BUDDHA=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_A3000_SCSI=y
CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_BVME6000_SCSI=y
CONFIG_SUN3X_ESP=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_ADB=y
CONFIG_ADB_MACII=y
-CONFIG_ADB_MACIISI=y
CONFIG_ADB_IOP=y
CONFIG_ADB_PMU68K=y
CONFIG_ADB_CUDA=y
CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
-CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
-CONFIG_ARIADNE=y
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
CONFIG_A2065=y
-CONFIG_HYDRA=y
-CONFIG_ZORRO8390=y
-CONFIG_APNE=y
-CONFIG_MAC8390=y
-CONFIG_MAC89x0=y
-CONFIG_MACSONIC=y
-CONFIG_MACMACE=y
-CONFIG_MVME147_NET=y
-CONFIG_MVME16x_NET=y
-CONFIG_BVME6000_NET=y
+CONFIG_ARIADNE=y
CONFIG_ATARILANCE=y
-CONFIG_SUN3LANCE=y
CONFIG_HPLANCE=y
+CONFIG_MVME147_NET=y
+CONFIG_SUN3LANCE=y
+CONFIG_MACMACE=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_BVME6000_NET=y
+CONFIG_MVME16x_NET=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_MACSONIC=y
+CONFIG_HYDRA=y
+CONFIG_MAC8390=y
CONFIG_NE2000=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_APNE=y
+CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_AMIGA=y
CONFIG_KEYBOARD_ATARI=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
CONFIG_MOUSE_AMIGA=m
CONFIG_MOUSE_ATARI=m
CONFIG_JOYSTICK_AMIGA=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
-CONFIG_HP_SDC_RTC=y
-# CONFIG_SERIO_SERPORT is not set
+CONFIG_HP_SDC_RTC=m
CONFIG_SERIO_Q40KBD=y
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_PMACZILOG=y
CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_SERIAL_PMACZILOG_CONSOLE=y
CONFIG_PRINTER=m
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_DMASOUND_Q40=m
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_RP5C01=m
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
CONFIG_ATARI_DSP56K=m
CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=y
-CONFIG_HFSPLUS_FS=y
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-mvme147"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_M68030=y
CONFIG_VME=y
CONFIG_MVME147=y
-CONFIG_M68030=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MVME147_SCSI=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
CONFIG_MVME147_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-mvme16x"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_VME=y
-CONFIG_MVME16x=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68040=y
CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_MVME16x=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MVME16x_SCSI=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_MVME16x_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-q40"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_Q40=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_M68040=y
CONFIG_M68060=y
+CONFIG_Q40=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_NE2000=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_M68K_BEEP=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_Q40KBD=m
+CONFIG_SERIO_Q40KBD=y
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_DMASOUND_Q40=m
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-sun3"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_SUN3=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SUN3_SCSI=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
CONFIG_SUN3LANCE=y
+# CONFIG_NET_CADENCE is not set
CONFIG_SUN3_82586=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
-# CONFIG_SERIO_SERPORT is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-sun3x"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
CONFIG_SUN3X=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
CONFIG_NET=y
CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SUN3X_ESP=y
CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
CONFIG_SUN3LANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_SERIAL=m
-# CONFIG_SERIO_SERPORT is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID=m
CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
CONFIG_CODA_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
-generic-y += futex.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
--- /dev/null
+#ifndef _ASM_M68K_FUTEX_H
+#define _ASM_M68K_FUTEX_H
+
+#ifdef __KERNEL__
+#if !defined(CONFIG_MMU)
+#include <asm-generic/futex.h>
+#else /* CONFIG_MMU */
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ u32 val;
+
+ if (unlikely(get_user(val, uaddr) != 0))
+ return -EFAULT;
+
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+ return -EFAULT;
+
+ *uval = val;
+
+ return 0;
+}
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval, ret;
+ u32 tmp;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ ret = -EFAULT;
+ if (unlikely(get_user(oldval, uaddr) != 0))
+ goto out_pagefault_enable;
+
+ ret = 0;
+ tmp = oldval;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ tmp = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ tmp += oparg;
+ break;
+ case FUTEX_OP_OR:
+ tmp |= oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ tmp &= ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ tmp ^= oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ ret = -EFAULT;
+
+out_pagefault_enable:
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (ret == 0) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+#endif /* CONFIG_MMU */
+#endif /* __KERNEL__ */
+#endif /* _ASM_M68K_FUTEX_H */
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
+#ifndef CONFIG_GPIOLIB
static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
int err;
return err;
}
-
+#endif /* !CONFIG_GPIOLIB */
#endif
#ifdef CONFIG_MAC
L(scc_initable_mac):
- .byte 9,12 /* Reset */
.byte 4,0x44 /* x16, 1 stopbit, no parity */
.byte 3,0xc0 /* receiver: 8 bpc */
.byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
- .byte 9,0 /* no interrupts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
.byte 12,1,13,0 /* 38400 baud */
is_not_mac(L(serial_init_not_mac))
#ifdef SERIAL_DEBUG
+
/* You may define either or both of these. */
#define MAC_USE_SCC_A /* Modem port */
#define MAC_USE_SCC_B /* Printer port */
#define mac_scc_cha_b_data_offset 0x4
#define mac_scc_cha_a_data_offset 0x6
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+ movel %pc@(L(mac_sccbase)),%a0
+ /* Reset SCC device */
+ moveb #9,%a0@(mac_scc_cha_a_ctrl_offset)
+ moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+ /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+ /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
+ movel #35,%d0
+5:
+ subq #1,%d0
+ jne 5b
+#endif
+
#ifdef MAC_USE_SCC_A
/* Initialize channel A */
- movel %pc@(L(mac_sccbase)),%a0
lea %pc@(L(scc_initable_mac)),%a1
5: moveb %a1@+,%d0
jmi 6f
#ifdef MAC_USE_SCC_B
/* Initialize channel B */
-#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
- movel %pc@(L(mac_sccbase)),%a0
-#endif /* MAC_USE_SCC_A */
lea %pc@(L(scc_initable_mac)),%a1
7: moveb %a1@+,%d0
jmi 8f
jra 7b
8:
#endif /* MAC_USE_SCC_B */
+
#endif /* SERIAL_DEBUG */
jra L(serial_init_done)
#ifdef SERIAL_DEBUG
-#ifdef MAC_USE_SCC_A
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
movel %pc@(L(mac_sccbase)),%a1
+#endif
+
+#ifdef MAC_USE_SCC_A
3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset)
jeq 3b
moveb %d0,%a1@(mac_scc_cha_a_data_offset)
#endif /* MAC_USE_SCC_A */
#ifdef MAC_USE_SCC_B
-#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
- movel %pc@(L(mac_sccbase)),%a1
-#endif /* MAC_USE_SCC_A */
4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset)
jeq 4b
moveb %d0,%a1@(mac_scc_cha_b_data_offset)
#define flush_cache_range(vma, start, len) do { } while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { \
- u32 addr = virt_to_phys(dst); \
- memcpy((dst), (src), (len)); \
- if (vma->vm_flags & VM_EXEC) { \
- invalidate_icache_range((unsigned) (addr), \
- (unsigned) (addr) + PAGE_SIZE); \
- flush_dcache_range((unsigned) (addr), \
- (unsigned) (addr) + PAGE_SIZE); \
- } \
-} while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-do { \
- memcpy((dst), (src), (len)); \
-} while (0)
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr,
+ void *dst, void *src, int len)
+{
+ u32 addr = virt_to_phys(dst);
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC) {
+ invalidate_icache_range(addr, addr + PAGE_SIZE);
+ flush_dcache_range(addr, addr + PAGE_SIZE);
+ }
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr,
+ void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+}
#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
__asm__ __volatile__ ("1: lwx %1, %3, r0; \
cmp %2, %1, %4; \
- beqi %2, 3f; \
+ bnei %2, 3f; \
2: swx %5, %3, r0; \
addic %2, r0, 0; \
bnei %2, 1b; \
* inb_p/inw_p/...
* The macros don't do byte-swapping.
*/
-#define inb(port) readb((u8 *)((port)))
+#define inb(port) readb((u8 *)((unsigned long)(port)))
#define outb(val, port) writeb((val), (u8 *)((unsigned long)(port)))
-#define inw(port) readw((u16 *)((port)))
+#define inw(port) readw((u16 *)((unsigned long)(port)))
#define outw(val, port) writew((val), (u16 *)((unsigned long)(port)))
-#define inl(port) readl((u32 *)((port)))
+#define inl(port) readl((u32 *)((unsigned long)(port)))
#define outl(val, port) writel((val), (u32 *)((unsigned long)(port)))
#define inb_p(port) inb((port))
if ((get_fs().seg < ((unsigned long)addr)) ||
(get_fs().seg < ((unsigned long)addr + size - 1))) {
pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
- type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+ type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
(u32)get_fs().seg);
return 0;
}
ok:
pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
- type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+ type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
(u32)get_fs().seg);
return 1;
}
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int volatile temp; \
+ int volatile temp = 0; \
int align = ~(line_length - 1); \
end = ((end & align) == end) ? end - line_length : end & align; \
WARN_ON(end - start < 0); \
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <asm/bootinfo.h>
+#include <asm/idle.h>
#include <asm/reboot.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <asm/idle.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
#include <linux/clk.h>
#include <asm/bootinfo.h>
+#include <asm/idle.h>
#include <asm/time.h> /* for mips_hpt_frequency */
#include <asm/reboot.h> /* for _machine_{restart,halt} */
#include <asm/mips_machine.h>
*/
static void octeon_kill_core(void *arg)
{
- mb();
- if (octeon_is_simulation()) {
- /* The simulator needs the watchdog to stop for dead cores */
- cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ if (octeon_is_simulation())
/* A break instruction causes the simulator stop a core */
- asm volatile ("sync\nbreak");
- }
+ asm volatile ("break" ::: "memory");
+
+ local_irq_disable();
+ /* Disable watchdog on this core. */
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ /* Spin in a low power mode. */
+ while (true)
+ asm volatile ("wait" ::: "memory");
}
#include <linux/io.h>
#include <linux/leds.h>
+#include <asm/idle.h>
#include <asm/processor.h>
#include <cobalt.h>
CONFIG_USB_HID=y
CONFIG_USB_SUPPORT=y
CONFIG_USB=y
-CONFIG_USB_SUSPEND=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
CONFIG_USB_OTG_WHITELIST=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
#include <linux/seq_file.h>
#include <linux/clk.h>
-extern void (*cpu_wait) (void);
-
struct clk;
struct clk_ops {
--- /dev/null
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/linkage.h>
+
+extern void (*cpu_wait)(void);
+extern void r4k_wait(void);
+extern asmlinkage void __r4k_wait(void);
+extern void r4k_wait_irqoff(void);
+extern void __pastwait(void);
+
+static inline int using_rollback_handler(void)
+{
+ return cpu_wait == r4k_wait;
+}
+
+static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
+{
+ return addr >= (unsigned long)r4k_wait_irqoff &&
+ addr < (unsigned long)__pastwait;
+}
+
+#endif /* __ASM_IDLE_H */
*/
static inline unsigned long virt_to_phys(volatile const void *address)
{
- return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
+ return __pa(address);
}
/*
+++ /dev/null
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License. See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#ifndef __LINUX_KVM_MIPS_H
-#define __LINUX_KVM_MIPS_H
-
-#include <linux/types.h>
-
-#define __KVM_MIPS
-
-#define N_MIPS_COPROC_REGS 32
-#define N_MIPS_COPROC_SEL 8
-
-/* for KVM_GET_REGS and KVM_SET_REGS */
-struct kvm_regs {
- __u32 gprs[32];
- __u32 hi;
- __u32 lo;
- __u32 pc;
-
- __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
-};
-
-/* for KVM_GET_SREGS and KVM_SET_SREGS */
-struct kvm_sregs {
-};
-
-/* for KVM_GET_FPU and KVM_SET_FPU */
-struct kvm_fpu {
-};
-
-struct kvm_debug_exit_arch {
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
-};
-
-struct kvm_mips_interrupt {
- /* in */
- __u32 cpu;
- __u32 irq;
-};
-
-/* definition of registers in kvm_run */
-struct kvm_sync_regs {
-};
-
-#endif /* __LINUX_KVM_MIPS_H */
#define VPN2_MASK 0xffffe000
#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
-#define TLB_ASID(x) (ASID_MASK((x).tlb_hi))
+#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
struct kvm_mips_tlb {
uint32_t cause);
int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
uint32_t cause);
- int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
- struct kvm_regs *regs);
- int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
- struct kvm_regs *regs);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-#define ASID_INC(asid) \
-({ \
- unsigned long __asid = asid; \
- __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
- ".section\t__asid_inc,\"a\"\n\t" \
- ".word\t1b\n\t" \
- ".previous" \
- :"=r" (__asid) \
- :"0" (__asid)); \
- __asid; \
-})
-#define ASID_MASK(asid) \
-({ \
- unsigned long __asid = asid; \
- __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
- ".section\t__asid_mask,\"a\"\n\t" \
- ".word\t1b\n\t" \
- ".previous" \
- :"=r" (__asid) \
- :"r" (__asid)); \
- __asid; \
-})
-#define ASID_VERSION_MASK \
-({ \
- unsigned long __asid; \
- __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
- ".section\t__asid_version_mask,\"a\"\n\t" \
- ".word\t1b\n\t" \
- ".previous" \
- :"=r" (__asid)); \
- __asid; \
-})
-#define ASID_FIRST_VERSION \
-({ \
- unsigned long __asid = asid; \
- __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
- ".section\t__asid_first_version,\"a\"\n\t" \
- ".word\t1b\n\t" \
- ".previous" \
- :"=r" (__asid)); \
- __asid; \
-})
-
-#define ASID_FIRST_VERSION_R3000 0x1000
-#define ASID_FIRST_VERSION_R4000 0x100
-#define ASID_FIRST_VERSION_R8000 0x1000
-#define ASID_FIRST_VERSION_RM9000 0x1000
+#define ASID_INC 0x40
+#define ASID_MASK 0xfc0
+
+#elif defined(CONFIG_CPU_R8000)
+
+#define ASID_INC 0x10
+#define ASID_MASK 0xff0
+
+#elif defined(CONFIG_MIPS_MT_SMTC)
+
+#define ASID_INC 0x1
+extern unsigned long smtc_asid_mask;
+#define ASID_MASK (smtc_asid_mask)
+#define HW_ASID_MASK 0xff
+/* End SMTC/34K debug hack */
+#else /* FIXME: not correct for R6000 */
+
+#define ASID_INC 0x1
+#define ASID_MASK 0xff
-#ifdef CONFIG_MIPS_MT_SMTC
-#define SMTC_HW_ASID_MASK 0xff
-extern unsigned int smtc_asid_mask;
#endif
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
+#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
+#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
+
#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */
static inline void
extern void kvm_local_flush_tlb_all(void);
unsigned long asid = asid_cache(cpu);
- if (!ASID_MASK((asid = ASID_INC(asid)))) {
+ if (! ((asid += ASID_INC) & ASID_MASK) ) {
if (cpu_has_vtag_icache)
flush_icache_all();
#ifdef CONFIG_VIRTUALIZATION
* free up the ASID value for use and flush any old
* instances of it from the TLB.
*/
- oldasid = ASID_MASK(read_c0_entryhi());
+ oldasid = (read_c0_entryhi() & ASID_MASK);
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
* having ASID_MASK smaller than the hardware maximum,
* make sure no "soft" bits become "hard"...
*/
- write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
+ write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
mtflags = dvpe();
- oldasid = ASID_MASK(read_c0_entryhi());
+ oldasid = read_c0_entryhi() & ASID_MASK;
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
- write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
- cpu_asid(cpu, next));
+ write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+ cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
prevvpe = dvpe();
- oldasid = ASID_MASK(read_c0_entryhi());
+ oldasid = (read_c0_entryhi() & ASID_MASK);
if (smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
- write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
+ write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
| cpu_asid(cpu, mm));
ehb(); /* Make sure it propagates to TCStatus */
evpe(prevvpe);
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#include <linux/pfn.h>
-#include <asm/io.h>
extern void build_clear_page(void);
extern void build_copy_page(void);
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
#endif
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+#include <asm/io.h>
/*
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) \
-({ \
- unsigned long __pfn = (pfn); \
- /* avoid <linux/bootmem.h> include hell */ \
- extern unsigned long min_low_pfn; \
- \
- __pfn >= min_low_pfn && __pfn < max_mapnr; \
-})
+static inline int pfn_valid(unsigned long pfn)
+{
+ /* avoid <linux/mm.h> include hell */
+ extern unsigned long max_mapnr;
+
+ return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
+}
#elif defined(CONFIG_SPARSEMEM)
/*
* System setup and hardware flags..
*/
-extern void (*cpu_wait)(void);
extern unsigned int vced_count, vcei_count;
#include <asm/isadep.h>
#include <uapi/asm/ptrace.h>
+/*
+ * This struct defines the way the registers are stored on the stack during a
+ * system call/exception. As usual the registers k0/k1 aren't being saved.
+ */
+struct pt_regs {
+#ifdef CONFIG_32BIT
+ /* Pad bytes for argument save space on the stack. */
+ unsigned long pad0[6];
+#endif
+
+ /* Saved main processor registers. */
+ unsigned long regs[32];
+
+ /* Saved special registers. */
+ unsigned long cp0_status;
+ unsigned long hi;
+ unsigned long lo;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ unsigned long acx;
+#endif
+ unsigned long cp0_badvaddr;
+ unsigned long cp0_cause;
+ unsigned long cp0_epc;
+#ifdef CONFIG_MIPS_MT_SMTC
+ unsigned long cp0_tcstatus;
+#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ unsigned long long mpl[3]; /* MTM{0,1,2} */
+ unsigned long long mtp[3]; /* MTP{0,1,2} */
+#endif
+} __aligned(8);
+
struct task_struct;
extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Cavium, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+/*
+ * KVM MIPS specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ *
+ * If Config[AT] is zero (32-bit CPU), the register contents are
+ * stored in the lower 32-bits of the struct kvm_regs fields and sign
+ * extended to 64-bits.
+ */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 gpr[32];
+ __u64 hi;
+ __u64 lo;
+ __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ *
+ * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
+ * are zero filled.
+ */
+struct kvm_fpu {
+ __u64 fpr[32];
+ __u32 fir;
+ __u32 fccr;
+ __u32 fexr;
+ __u32 fenr;
+ __u32 fcsr;
+ __u32 pad;
+};
+
+
+/*
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * registers. The id field is broken down as follows:
+ *
+ * bits[2..0] - Register 'sel' index.
+ * bits[7..3] - Register 'rd' index.
+ * bits[15..8] - Must be zero.
+ * bits[63..16] - 1 -> CP0 registers.
+ *
+ * Other sets registers may be added in the future. Each set would
+ * have its own identifier in bits[63..16].
+ *
+ * The addr field of struct kvm_one_reg must point to an aligned
+ * 64-bit wide location. For registers that are narrower than
+ * 64-bits, the value is stored in the low order bits of the location,
+ * and sign extended to 64-bits.
+ *
+ * The registers defined in struct kvm_regs are also accessible, the
+ * id values for these are below.
+ */
+
+#define KVM_REG_MIPS_R0 0
+#define KVM_REG_MIPS_R1 1
+#define KVM_REG_MIPS_R2 2
+#define KVM_REG_MIPS_R3 3
+#define KVM_REG_MIPS_R4 4
+#define KVM_REG_MIPS_R5 5
+#define KVM_REG_MIPS_R6 6
+#define KVM_REG_MIPS_R7 7
+#define KVM_REG_MIPS_R8 8
+#define KVM_REG_MIPS_R9 9
+#define KVM_REG_MIPS_R10 10
+#define KVM_REG_MIPS_R11 11
+#define KVM_REG_MIPS_R12 12
+#define KVM_REG_MIPS_R13 13
+#define KVM_REG_MIPS_R14 14
+#define KVM_REG_MIPS_R15 15
+#define KVM_REG_MIPS_R16 16
+#define KVM_REG_MIPS_R17 17
+#define KVM_REG_MIPS_R18 18
+#define KVM_REG_MIPS_R19 19
+#define KVM_REG_MIPS_R20 20
+#define KVM_REG_MIPS_R21 21
+#define KVM_REG_MIPS_R22 22
+#define KVM_REG_MIPS_R23 23
+#define KVM_REG_MIPS_R24 24
+#define KVM_REG_MIPS_R25 25
+#define KVM_REG_MIPS_R26 26
+#define KVM_REG_MIPS_R27 27
+#define KVM_REG_MIPS_R28 28
+#define KVM_REG_MIPS_R29 29
+#define KVM_REG_MIPS_R30 30
+#define KVM_REG_MIPS_R31 31
+
+#define KVM_REG_MIPS_HI 32
+#define KVM_REG_MIPS_LO 33
+#define KVM_REG_MIPS_PC 34
+
+/*
+ * KVM MIPS specific structures and definitions
+ *
+ */
+struct kvm_debug_exit_arch {
+ __u64 epc;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_mips_interrupt {
+ /* in */
+ __u32 cpu;
+ __u32 irq;
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
#define DSP_CONTROL 77
#define ACX 78
+#ifndef __KERNEL__
/*
* This struct defines the way the registers are stored on the stack during a
* system call/exception. As usual the registers k0/k1 aren't being saved.
*/
struct pt_regs {
-#ifdef CONFIG_32BIT
- /* Pad bytes for argument save space on the stack. */
- unsigned long pad0[6];
-#endif
-
/* Saved main processor registers. */
unsigned long regs[32];
unsigned long cp0_status;
unsigned long hi;
unsigned long lo;
-#ifdef CONFIG_CPU_HAS_SMARTMIPS
- unsigned long acx;
-#endif
unsigned long cp0_badvaddr;
unsigned long cp0_cause;
unsigned long cp0_epc;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long cp0_tcstatus;
-#endif /* CONFIG_MIPS_MT_SMTC */
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
- unsigned long long mpl[3]; /* MTM{0,1,2} */
- unsigned long long mtp[3]; /* MTP{0,1,2} */
-#endif
} __attribute__ ((aligned (8)));
+#endif /* __KERNEL__ */
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
#define __NR_process_vm_writev (__NR_Linux + 305)
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
+#define __NR_getdents64 (__NR_Linux + 308)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 307
+#define __NR_Linux_syscalls 308
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 307
+#define __NR_64_Linux_syscalls 308
#if _MIPS_SIM == _MIPS_SIM_NABI32
extra-y := head.o vmlinux.lds
-obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
+obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
prom.o ptrace.o reset.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o watch.o vdso.o
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
#include <asm/spram.h>
#include <asm/uaccess.h>
-/*
- * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
- * the implementation of the "wait" feature differs between CPU families. This
- * points to the function that implements CPU specific wait.
- * The wait instruction stops the pipeline and reduces the power consumption of
- * the CPU very much.
- */
-void (*cpu_wait)(void);
-EXPORT_SYMBOL(cpu_wait);
-
-static void r3081_wait(void)
-{
- unsigned long cfg = read_c0_conf();
- write_c0_conf(cfg | R30XX_CONF_HALT);
-}
-
-static void r39xx_wait(void)
-{
- local_irq_disable();
- if (!need_resched())
- write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
- local_irq_enable();
-}
-
-extern void r4k_wait(void);
-
-/*
- * This variant is preferable as it allows testing need_resched and going to
- * sleep depending on the outcome atomically. Unfortunately the "It is
- * implementation-dependent whether the pipeline restarts when a non-enabled
- * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
- * using this version a gamble.
- */
-void r4k_wait_irqoff(void)
-{
- local_irq_disable();
- if (!need_resched())
- __asm__(" .set push \n"
- " .set mips3 \n"
- " wait \n"
- " .set pop \n");
- local_irq_enable();
- __asm__(" .globl __pastwait \n"
- "__pastwait: \n");
-}
-
-/*
- * The RM7000 variant has to handle erratum 38. The workaround is to not
- * have any pending stores when the WAIT instruction is executed.
- */
-static void rm7k_wait_irqoff(void)
-{
- local_irq_disable();
- if (!need_resched())
- __asm__(
- " .set push \n"
- " .set mips3 \n"
- " .set noat \n"
- " mfc0 $1, $12 \n"
- " sync \n"
- " mtc0 $1, $12 # stalls until W stage \n"
- " wait \n"
- " mtc0 $1, $12 # stalls until W stage \n"
- " .set pop \n");
- local_irq_enable();
-}
-
-/*
- * The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter.
- * alchemy/common/time.c may override cpu_wait!
- */
-static void au1k_wait(void)
-{
- __asm__(" .set mips3 \n"
- " cache 0x14, 0(%0) \n"
- " cache 0x14, 32(%0) \n"
- " sync \n"
- " nop \n"
- " wait \n"
- " nop \n"
- " nop \n"
- " nop \n"
- " nop \n"
- " .set mips0 \n"
- : : "r" (au1k_wait));
-}
-
-static int __initdata nowait;
-
-static int __init wait_disable(char *s)
-{
- nowait = 1;
-
- return 1;
-}
-
-__setup("nowait", wait_disable);
-
static int __cpuinitdata mips_fpu_disabled;
static int __init fpu_disable(char *s)
__setup("nodsp", dsp_disable);
-void __init check_wait(void)
-{
- struct cpuinfo_mips *c = ¤t_cpu_data;
-
- if (nowait) {
- printk("Wait instruction disabled.\n");
- return;
- }
-
- switch (c->cputype) {
- case CPU_R3081:
- case CPU_R3081E:
- cpu_wait = r3081_wait;
- break;
- case CPU_TX3927:
- cpu_wait = r39xx_wait;
- break;
- case CPU_R4200:
-/* case CPU_R4300: */
- case CPU_R4600:
- case CPU_R4640:
- case CPU_R4650:
- case CPU_R4700:
- case CPU_R5000:
- case CPU_R5500:
- case CPU_NEVADA:
- case CPU_4KC:
- case CPU_4KEC:
- case CPU_4KSC:
- case CPU_5KC:
- case CPU_25KF:
- case CPU_PR4450:
- case CPU_BMIPS3300:
- case CPU_BMIPS4350:
- case CPU_BMIPS4380:
- case CPU_BMIPS5000:
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- case CPU_CAVIUM_OCTEON2:
- case CPU_JZRISC:
- case CPU_LOONGSON1:
- case CPU_XLR:
- case CPU_XLP:
- cpu_wait = r4k_wait;
- break;
-
- case CPU_RM7000:
- cpu_wait = rm7k_wait_irqoff;
- break;
-
- case CPU_M14KC:
- case CPU_M14KEC:
- case CPU_24K:
- case CPU_34K:
- case CPU_1004K:
- cpu_wait = r4k_wait;
- if (read_c0_config7() & MIPS_CONF7_WII)
- cpu_wait = r4k_wait_irqoff;
- break;
-
- case CPU_74K:
- cpu_wait = r4k_wait;
- if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
- cpu_wait = r4k_wait_irqoff;
- break;
-
- case CPU_TX49XX:
- cpu_wait = r4k_wait_irqoff;
- break;
- case CPU_ALCHEMY:
- cpu_wait = au1k_wait;
- break;
- case CPU_20KC:
- /*
- * WAIT on Rev1.0 has E1, E2, E3 and E16.
- * WAIT on Rev2.0 and Rev3.0 has E16.
- * Rev3.1 WAIT is nop, why bother
- */
- if ((c->processor_id & 0xff) <= 0x64)
- break;
-
- /*
- * Another rev is incremeting c0_count at a reduced clock
- * rate while in WAIT mode. So we basically have the choice
- * between using the cp0 timer as clocksource or avoiding
- * the WAIT instruction. Until more details are known,
- * disable the use of WAIT for 20Kc entirely.
- cpu_wait = r4k_wait;
- */
- break;
- case CPU_RM9000:
- if ((c->processor_id & 0x00ff) >= 0x40)
- cpu_wait = r4k_wait;
- break;
- default:
- break;
- }
-}
-
static inline void check_errata(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
#include <linux/bootmem.h>
#include <linux/crash_dump.h>
#include <asm/uaccess.h>
+#include <linux/slab.h>
static int __init parse_savemaxmem(char *p)
{
__FINIT
.align 5 /* 32 byte rollback region */
-LEAF(r4k_wait)
+LEAF(__r4k_wait)
.set push
.set noreorder
/* start of rollback region */
jr ra
nop
.set pop
- END(r4k_wait)
+ END(__r4k_wait)
.macro BUILD_ROLLBACK_PROLOGUE handler
FEXPORT(rollback_\handler)
.set push
.set noat
MFC0 k0, CP0_EPC
- PTR_LA k1, r4k_wait
+ PTR_LA k1, __r4k_wait
ori k0, 0x1f /* 32 byte rollback region */
xori k0, 0x1f
bne k0, k1, 9f
.set noreorder
/* check if TLB contains a entry for EPC */
MFC0 k1, CP0_ENTRYHI
- andi k1, 0xff /* ASID_MASK patched at run-time!! */
+ andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
PTR_SRL k0, _PAGE_SHIFT + 1
PTR_SLL k0, _PAGE_SHIFT + 1
--- /dev/null
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void r3081_wait(void)
+{
+ unsigned long cfg = read_c0_conf();
+ write_c0_conf(cfg | R30XX_CONF_HALT);
+ local_irq_enable();
+}
+
+static void r39xx_wait(void)
+{
+ if (!need_resched())
+ write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+ local_irq_enable();
+}
+
+void r4k_wait(void)
+{
+ local_irq_enable();
+ __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void r4k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set mips3 \n"
+ " wait \n"
+ " .set pop \n");
+ local_irq_enable();
+ __asm__(
+ " .globl __pastwait \n"
+ "__pastwait: \n");
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void rm7k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set mips3 \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " sync \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " wait \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " .set pop \n");
+ local_irq_enable();
+}
+
+/*
+ * The Au1xxx wait is available only if using 32khz counter or
+ * external timer source, but specifically not CP0 Counter.
+ * alchemy/common/time.c may override cpu_wait!
+ */
+static void au1k_wait(void)
+{
+ __asm__(
+ " .set mips3 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " nop \n"
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set mips0 \n"
+ : : "r" (au1k_wait));
+ local_irq_enable();
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+ nowait = 1;
+
+ return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+ struct cpuinfo_mips *c = ¤t_cpu_data;
+
+ if (nowait) {
+ printk("Wait instruction disabled.\n");
+ return;
+ }
+
+ switch (c->cputype) {
+ case CPU_R3081:
+ case CPU_R3081E:
+ cpu_wait = r3081_wait;
+ break;
+ case CPU_TX3927:
+ cpu_wait = r39xx_wait;
+ break;
+ case CPU_R4200:
+/* case CPU_R4300: */
+ case CPU_R4600:
+ case CPU_R4640:
+ case CPU_R4650:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5500:
+ case CPU_NEVADA:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_4KSC:
+ case CPU_5KC:
+ case CPU_25KF:
+ case CPU_PR4450:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_JZRISC:
+ case CPU_LOONGSON1:
+ case CPU_XLR:
+ case CPU_XLP:
+ cpu_wait = r4k_wait;
+ break;
+
+ case CPU_RM7000:
+ cpu_wait = rm7k_wait_irqoff;
+ break;
+
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ cpu_wait = r4k_wait;
+ if (read_c0_config7() & MIPS_CONF7_WII)
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_74K:
+ cpu_wait = r4k_wait;
+ if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_ALCHEMY:
+ cpu_wait = au1k_wait;
+ break;
+ case CPU_20KC:
+ /*
+ * WAIT on Rev1.0 has E1, E2, E3 and E16.
+ * WAIT on Rev2.0 and Rev3.0 has E16.
+ * Rev3.1 WAIT is nop, why bother
+ */
+ if ((c->processor_id & 0xff) <= 0x64)
+ break;
+
+ /*
+ * Another rev is incremeting c0_count at a reduced clock
+ * rate while in WAIT mode. So we basically have the choice
+ * between using the cp0 timer as clocksource or avoiding
+ * the WAIT instruction. Until more details are known,
+ * disable the use of WAIT for 20Kc entirely.
+ cpu_wait = r4k_wait;
+ */
+ break;
+ case CPU_RM9000:
+ if ((c->processor_id & 0x00ff) >= 0x40)
+ cpu_wait = r4k_wait;
+ break;
+ default:
+ break;
+ }
+}
+
+static void smtc_idle_hook(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+ void smtc_idle_loop_hook(void);
+
+ smtc_idle_loop_hook();
+#endif
+}
+
+void arch_cpu_idle(void)
+{
+ smtc_idle_hook();
+ if (cpu_wait)
+ cpu_wait();
+ else
+ local_irq_enable();
+}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- free_insn_slot(p->ainsn.insn, 0);
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
}
static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/prom.h>
}
#endif
-void arch_cpu_idle(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- extern void smtc_idle_loop_hook(void);
-
- smtc_idle_loop_hook();
-#endif
- if (cpu_wait)
- (*cpu_wait)();
- else
- local_irq_enable();
-}
-
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
int pc_offset;
};
+#define J_TARGET(pc,target) \
+ (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+
static inline int is_ra_save_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
#endif
}
-static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
+static inline int is_jump_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
return 0;
return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
#else
+ if (ip->j_format.opcode == j_op)
+ return 1;
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
for (i = 0; i < max_insns; i++, ip++) {
- if (is_jal_jalr_jr_ins(ip))
+ if (is_jump_ins(ip))
break;
if (!info->frame_size) {
if (is_sp_move_ins(ip))
static struct mips_frame_info schedule_mfi __read_mostly;
+#ifdef CONFIG_KALLSYMS
+static unsigned long get___schedule_addr(void)
+{
+ return kallsyms_lookup_name("__schedule");
+}
+#else
+static unsigned long get___schedule_addr(void)
+{
+ union mips_instruction *ip = (void *)schedule;
+ int max_insns = 8;
+ int i;
+
+ for (i = 0; i < max_insns; i++, ip++) {
+ if (ip->j_format.opcode == j_op)
+ return J_TARGET(ip, ip->j_format.target);
+ }
+ return 0;
+}
+#endif
+
static int __init frame_info_init(void)
{
unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
unsigned long ofs;
+#endif
+ unsigned long addr;
+
+ addr = get___schedule_addr();
+ if (!addr)
+ addr = (unsigned long)schedule;
- kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
+#ifdef CONFIG_KALLSYMS
+ kallsyms_lookup_size_offset(addr, &size, &ofs);
#endif
- schedule_mfi.func = schedule;
+ schedule_mfi.func = (void *)addr;
schedule_mfi.func_size = size;
get_frame_info(&schedule_mfi);
#include <asm/processor.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
+#include <asm/setup.h>
static struct rtlx_info *rtlx;
static int major;
PTR sys_process_vm_writev /* 5305 */
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_getdents64
.size sys_call_table,.-sys_call_table
#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
+#include <asm/idle.h>
#include <asm/r4k-timer.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/hardirq.h>
#include <asm/hazards.h>
#include <asm/irq.h>
+#include <asm/idle.h>
#include <asm/mmu_context.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
static int ipibuffers;
static int nostlb;
static int asidmask;
-unsigned int smtc_asid_mask = 0xff;
+unsigned long smtc_asid_mask = 0xff;
static int __init vpe0tcs(char *str)
{
unsigned long flags;
int mtflags;
unsigned long tcrestart;
- extern void r4k_wait_irqoff(void), __pastwait(void);
int set_resched_flag = (type == LINUX_SMP_IPI &&
action == SMP_RESCHEDULE_YOURSELF);
*/
if (cpu_wait == r4k_wait_irqoff) {
tcrestart = read_tc_c0_tcrestart();
- if (tcrestart >= (unsigned long)r4k_wait_irqoff
- && tcrestart < (unsigned long)__pastwait) {
+ if (address_is_in_r4k_wait_irqoff(tcrestart)) {
write_tc_c0_tcrestart(__pastwait);
tcstatus &= ~TCSTATUS_IXMT;
write_tc_c0_tcstatus(tcstatus);
asid = asid_cache(cpu);
do {
- if (!ASID_MASK(ASID_INC(asid))) {
+ if (!((asid += ASID_INC) & ASID_MASK) ) {
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contiguous range) */
mips_ihb();
}
tcstat = read_tc_c0_tcstatus();
- smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
+ smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
if (!prevhalt)
write_tc_c0_tchalt(0);
}
asid = ASID_FIRST_VERSION;
local_flush_tlb_all(); /* start new asid cycle */
}
- } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
+ } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
tlb_read();
ehb();
ehi = read_c0_entryhi();
- if (ASID_MASK(ehi) == asid) {
+ if ((ehi & ASID_MASK) == asid) {
/*
* Invalidate only entries with specified ASID,
* makiing sure all entries differ.
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
+#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/uasm.h>
extern void check_wait(void);
-extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
extern u32 handle_tlbl[];
asmlinkage void do_tr(struct pt_regs *regs)
{
- unsigned int opcode, tcode = 0;
+ u32 opcode, tcode = 0;
u16 instr[2];
- unsigned long epc = exception_epc(regs);
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
- if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
- (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
+ __get_user(instr[1], (u16 __user *)(epc + 2)))
goto out_sigsegv;
- opcode = (instr[0] << 16) | instr[1];
-
- /* Immediate versions don't provide a code. */
- if (!(opcode & OPCODE)) {
- if (get_isa16_mode(regs->cp0_epc))
- /* microMIPS */
- tcode = (opcode >> 12) & 0x1f;
- else
- tcode = ((opcode >> 6) & ((1 << 10) - 1));
+ opcode = (instr[0] << 16) | instr[1];
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 12) & ((1 << 4) - 1);
+ } else {
+ if (__get_user(opcode, (u32 __user *)epc))
+ goto out_sigsegv;
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 6) & ((1 << 10) - 1);
}
do_trap_or_bp(regs, tcode, "Trap");
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
- char *vec_start = (cpu_wait == r4k_wait) ?
+ char *vec_start = using_rollback_handler() ?
&rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
- unsigned long asid = 0;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
- asid = ASID_FIRST_VERSION;
- cpu_data[cpu].asid_cache = asid;
- TLBMISS_HANDLER_SETUP();
+ if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
extern char except_vec4;
extern char except_vec3_r4000;
unsigned long i;
- int rollback;
check_wait();
- rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
if (board_be_init)
board_be_init();
- set_except_vector(0, rollback ? rollback_handle_int : handle_int);
+ set_except_vector(0, using_rollback_handler() ? rollback_handle_int
+ : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
long
kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
int
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ return -ENOIOCTLCMD;
+}
+
+#define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0)
+#define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1)
+#define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0)
+#define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0)
+#define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0)
+#define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0)
+#define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0)
+#define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0)
+#define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0)
+#define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1)
+#define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3)
+#define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7)
+#define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0)
+#define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0)
+
+static u64 kvm_mips_get_one_regs[] = {
+ KVM_REG_MIPS_R0,
+ KVM_REG_MIPS_R1,
+ KVM_REG_MIPS_R2,
+ KVM_REG_MIPS_R3,
+ KVM_REG_MIPS_R4,
+ KVM_REG_MIPS_R5,
+ KVM_REG_MIPS_R6,
+ KVM_REG_MIPS_R7,
+ KVM_REG_MIPS_R8,
+ KVM_REG_MIPS_R9,
+ KVM_REG_MIPS_R10,
+ KVM_REG_MIPS_R11,
+ KVM_REG_MIPS_R12,
+ KVM_REG_MIPS_R13,
+ KVM_REG_MIPS_R14,
+ KVM_REG_MIPS_R15,
+ KVM_REG_MIPS_R16,
+ KVM_REG_MIPS_R17,
+ KVM_REG_MIPS_R18,
+ KVM_REG_MIPS_R19,
+ KVM_REG_MIPS_R20,
+ KVM_REG_MIPS_R21,
+ KVM_REG_MIPS_R22,
+ KVM_REG_MIPS_R23,
+ KVM_REG_MIPS_R24,
+ KVM_REG_MIPS_R25,
+ KVM_REG_MIPS_R26,
+ KVM_REG_MIPS_R27,
+ KVM_REG_MIPS_R28,
+ KVM_REG_MIPS_R29,
+ KVM_REG_MIPS_R30,
+ KVM_REG_MIPS_R31,
+
+ KVM_REG_MIPS_HI,
+ KVM_REG_MIPS_LO,
+ KVM_REG_MIPS_PC,
+
+ KVM_REG_MIPS_CP0_INDEX,
+ KVM_REG_MIPS_CP0_CONTEXT,
+ KVM_REG_MIPS_CP0_PAGEMASK,
+ KVM_REG_MIPS_CP0_WIRED,
+ KVM_REG_MIPS_CP0_BADVADDR,
+ KVM_REG_MIPS_CP0_ENTRYHI,
+ KVM_REG_MIPS_CP0_STATUS,
+ KVM_REG_MIPS_CP0_CAUSE,
+ /* EPC set via kvm_regs, et al. */
+ KVM_REG_MIPS_CP0_CONFIG,
+ KVM_REG_MIPS_CP0_CONFIG1,
+ KVM_REG_MIPS_CP0_CONFIG2,
+ KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG7,
+ KVM_REG_MIPS_CP0_ERROREPC
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ u64 __user *uaddr = (u64 __user *)(long)reg->addr;
+
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ s64 v;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+ v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+ break;
+ case KVM_REG_MIPS_HI:
+ v = (long)vcpu->arch.hi;
+ break;
+ case KVM_REG_MIPS_LO:
+ v = (long)vcpu->arch.lo;
+ break;
+ case KVM_REG_MIPS_PC:
+ v = (long)vcpu->arch.pc;
+ break;
+
+ case KVM_REG_MIPS_CP0_INDEX:
+ v = (long)kvm_read_c0_guest_index(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ v = (long)kvm_read_c0_guest_context(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ v = (long)kvm_read_c0_guest_pagemask(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ v = (long)kvm_read_c0_guest_wired(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ v = (long)kvm_read_c0_guest_badvaddr(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ v = (long)kvm_read_c0_guest_entryhi(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ v = (long)kvm_read_c0_guest_status(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ v = (long)kvm_read_c0_guest_cause(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ v = (long)kvm_read_c0_guest_errorepc(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ v = (long)kvm_read_c0_guest_config(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ v = (long)kvm_read_c0_guest_config1(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ v = (long)kvm_read_c0_guest_config2(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ v = (long)kvm_read_c0_guest_config3(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG7:
+ v = (long)kvm_read_c0_guest_config7(cop0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return put_user(v, uaddr);
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ u64 __user *uaddr = (u64 __user *)(long)reg->addr;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u64 v;
+
+ if (get_user(v, uaddr) != 0)
+ return -EFAULT;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_R0:
+ /* Silently ignore requests to set $0 */
+ break;
+ case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+ vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+ break;
+ case KVM_REG_MIPS_HI:
+ vcpu->arch.hi = v;
+ break;
+ case KVM_REG_MIPS_LO:
+ vcpu->arch.lo = v;
+ break;
+ case KVM_REG_MIPS_PC:
+ vcpu->arch.pc = v;
+ break;
+
+ case KVM_REG_MIPS_CP0_INDEX:
+ kvm_write_c0_guest_index(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ kvm_write_c0_guest_context(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ kvm_write_c0_guest_pagemask(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ kvm_write_c0_guest_wired(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ kvm_write_c0_guest_badvaddr(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ kvm_write_c0_guest_entryhi(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ kvm_write_c0_guest_status(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ kvm_write_c0_guest_cause(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ kvm_write_c0_guest_errorepc(cop0, v);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
long
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
long r;
- int intr;
switch (ioctl) {
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+ if (copy_from_user(®, argp, sizeof(reg)))
+ return -EFAULT;
+ if (ioctl == KVM_SET_ONE_REG)
+ return kvm_mips_set_reg(vcpu, ®);
+ else
+ return kvm_mips_get_reg(vcpu, ®);
+ }
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ u64 __user *reg_dest;
+ struct kvm_reg_list reg_list;
+ unsigned n;
+
+ if (copy_from_user(®_list, user_list, sizeof(reg_list)))
+ return -EFAULT;
+ n = reg_list.n;
+ reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+ if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
+ return -EFAULT;
+ if (n < reg_list.n)
+ return -E2BIG;
+ reg_dest = user_list->reg;
+ if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+ sizeof(kvm_mips_get_one_regs)))
+ return -EFAULT;
+ return 0;
+ }
case KVM_NMI:
/* Treat the NMI as a CPU reset */
r = kvm_mips_reset_vcpu(vcpu);
if (copy_from_user(&irq, argp, sizeof(irq)))
goto out;
- intr = (int)irq.irq;
-
kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
irq.irq);
break;
}
default:
- r = -EINVAL;
+ r = -ENOIOCTLCMD;
}
out:
switch (ioctl) {
default:
- r = -EINVAL;
+ r = -ENOIOCTLCMD;
}
return r;
int
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- return -ENOTSUPP;
+ return -ENOIOCTLCMD;
}
int
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- return -ENOTSUPP;
+ return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- return -ENOTSUPP;
+ return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- return -ENOTSUPP;
+ return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
int r;
switch (ext) {
+ case KVM_CAP_ONE_REG:
+ r = 1;
+ break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
break;
}
return r;
-
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
int i;
- for (i = 0; i < 32; i++)
- vcpu->arch.gprs[i] = regs->gprs[i];
-
+ for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+ vcpu->arch.gprs[i] = regs->gpr[i];
+ vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
vcpu->arch.hi = regs->hi;
vcpu->arch.lo = regs->lo;
vcpu->arch.pc = regs->pc;
- return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+ return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
- for (i = 0; i < 32; i++)
- regs->gprs[i] = vcpu->arch.gprs[i];
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+ regs->gpr[i] = vcpu->arch.gprs[i];
regs->hi = vcpu->arch.hi;
regs->lo = vcpu->arch.lo;
regs->pc = vcpu->arch.pc;
- return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+ return 0;
}
void kvm_mips_comparecount_func(unsigned long data)
printk("MTCz, cop0->reg[EBASE]: %#lx\n",
kvm_read_c0_guest_ebase(cop0));
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+ uint32_t nasid =
+ vcpu->arch.gprs[rt] & ASID_MASK;
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
&&
- (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
- != nasid)) {
+ ((kvm_read_c0_guest_entryhi(cop0) &
+ ASID_MASK) != nasid)) {
kvm_debug
("MTCz, change ASID from %#lx to %#lx\n",
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
- ASID_MASK(vcpu->arch.gprs[rt]));
+ kvm_read_c0_guest_entryhi(cop0) &
+ ASID_MASK,
+ vcpu->arch.gprs[rt] & ASID_MASK);
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
* resulting handler will do the right thing
*/
index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+ (kvm_read_c0_guest_entryhi
+ (cop0) & ASID_MASK));
if (index < 0) {
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
enum emulation_result er = EMULATE_DONE;
unsigned long entryhi =
(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
*/
index = kvm_mips_guest_tlb_lookup(vcpu,
(va & VPN2_MASK) |
- ASID_MASK(kvm_read_c0_guest_entryhi
- (vcpu->arch.cop0)));
+ (kvm_read_c0_guest_entryhi
+ (vcpu->arch.cop0) & ASID_MASK));
if (index < 0) {
if (exccode == T_TLB_LD_MISS) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
#include <asm/cpu.h>
#include <asm/bootinfo.h>
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
- return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+ return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
}
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
- return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+ return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
}
inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
old_pagemask = read_c0_pagemask();
printk("HOST TLBs:\n");
- printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+ printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
for (i = 0; i < current_cpu_data.tlbsize; i++) {
write_c0_index(i);
}
}
-static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
{
+ int srcu_idx, err = 0;
pfn_t pfn;
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
- return;
+ return 0;
+ srcu_idx = srcu_read_lock(&kvm->srcu);
pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
if (kvm_mips_is_error_pfn(pfn)) {
- panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ err = -EFAULT;
+ goto out;
}
kvm->arch.guest_pmap[gfn] = pfn;
- return;
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return err;
}
/* Translate guest KSEG0 addresses to Host PA */
gva);
return KVM_INVALID_PAGE;
}
- kvm_mips_map_page(vcpu->kvm, gfn);
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return KVM_INVALID_ADDR;
+
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
}
even = !(gfn & 0x1);
vaddr = badvaddr & (PAGE_MASK << 1);
- kvm_mips_map_page(vcpu->kvm, gfn);
- kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+ return -1;
if (even) {
pfn0 = kvm->arch.guest_pmap[gfn];
pfn0 = 0;
pfn1 = 0;
} else {
- kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
- kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
+ return -1;
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
- (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+ (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
index = i;
break;
}
{
unsigned long asid = asid_cache(cpu);
- if (!(ASID_MASK(ASID_INC(asid)))) {
+ if (!((asid += ASID_INC) & ASID_MASK)) {
if (cpu_has_vtag_icache) {
flush_icache_all();
}
if (!newasid) {
/* If we preempted while the guest was executing, then reload the pre-empted ASID */
if (current->flags & PF_VCPU) {
- write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+ write_c0_entryhi(vcpu->arch.
+ preempt_entryhi & ASID_MASK);
ehb();
}
} else {
*/
if (current->flags & PF_VCPU) {
if (KVM_GUEST_KERNEL_MODE(vcpu))
- write_c0_entryhi(ASID_MASK(vcpu->arch.
- guest_kernel_asid[cpu]));
+ write_c0_entryhi(vcpu->arch.
+ guest_kernel_asid[cpu] &
+ ASID_MASK);
else
- write_c0_entryhi(ASID_MASK(vcpu->arch.
- guest_user_asid[cpu]));
+ write_c0_entryhi(vcpu->arch.
+ guest_user_asid[cpu] &
+ ASID_MASK);
ehb();
}
}
kvm_mips_guest_tlb_lookup(vcpu,
((unsigned long) opc & VPN2_MASK)
|
- ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+ (kvm_read_c0_guest_entryhi
+ (cop0) & ASID_MASK));
if (index < 0) {
kvm_err
("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
return ret;
}
-static int
-kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
- kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
- kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
- kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
- kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
-
- kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
- kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
- kvm_write_c0_guest_pagemask(cop0,
- regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
- kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
- kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
-
- return 0;
-}
-
-static int
-kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
- regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
- regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
- regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
- regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
-
- regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
- regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
- regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
- kvm_read_c0_guest_pagemask(cop0);
- regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
- regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
-
- regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
- regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
- regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
- regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
- regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
-
- return 0;
-}
-
static int kvm_trap_emul_vm_init(struct kvm *kvm)
{
return 0;
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
.irq_deliver = kvm_mips_irq_deliver_cb,
.irq_clear = kvm_mips_irq_clear_cb,
- .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
- .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
};
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get resource\n");
- return -ENOMEM;
- }
/* remap gptu register range */
gptu_membase = devm_ioremap_resource(&pdev->dev, res);
if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
dev_err(&pdev->dev, "Failed to find magic\n");
gptu_hwexit();
+ clk_disable(clk);
+ clk_put(clk);
return -ENAVAIL;
}
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
-#include <asm/mmu_context.h>
static inline const char *msk2str(unsigned int mask)
{
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
- asid = ASID_MASK(s_entryhi);
+ asid = s_entryhi & 0xff;
for (i = first; i <= last; i++) {
write_c0_index(i);
printk("va=%0*lx asid=%02lx\n",
width, (entryhi & ~0x1fffUL),
- ASID_MASK(entryhi));
+ entryhi & 0xff);
printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
width,
(entrylo0 << 6) & PAGE_MASK, c0,
#include <linux/mm.h>
#include <asm/mipsregs.h>
-#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
unsigned int asid;
unsigned long entryhi, entrylo0;
- asid = ASID_MASK(read_c0_entryhi());
+ asid = read_c0_entryhi() & 0xfc0;
for (i = first; i <= last; i++) {
write_c0_index(i<<8);
/* Unused entries have a virtual address of KSEG0. */
if ((entryhi & 0xffffe000) != 0x80000000
- && (ASID_MASK(entryhi) == asid)) {
+ && (entryhi & 0xfc0) == asid) {
/*
* Only print entries in use
*/
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
(entryhi & 0xffffe000),
- ASID_MASK(entryhi),
+ entryhi & 0xfc0,
entrylo0 & PAGE_MASK,
(entrylo0 & (1 << 11)) ? 1 : 0,
(entrylo0 & (1 << 10)) ? 1 : 0,
#include <linux/init.h>
#include <linux/pm.h>
+#include <asm/idle.h>
#include <asm/reboot.h>
#include <loongson.h>
#include <linux/io.h>
#include <linux/pm.h>
+#include <asm/idle.h>
#include <asm/reboot.h>
#include <loongson1.h>
#endif
local_irq_save(flags);
- old_ctx = ASID_MASK(read_c0_entryhi());
+ old_ctx = read_c0_entryhi() & ASID_MASK;
write_c0_entrylo0(0);
entry = r3k_have_wired_reg ? read_c0_wired() : 8;
for (; entry < current_cpu_data.tlbsize; entry++) {
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
- ASID_MASK(cpu_context(cpu, mm)), start, end);
+ cpu_context(cpu, mm) & ASID_MASK, start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
- int oldpid = ASID_MASK(read_c0_entryhi());
- int newpid = ASID_MASK(cpu_context(cpu, mm));
+ int oldpid = read_c0_entryhi() & ASID_MASK;
+ int newpid = cpu_context(cpu, mm) & ASID_MASK;
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
#ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif
- newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
+ newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
page &= PAGE_MASK;
local_irq_save(flags);
- oldpid = ASID_MASK(read_c0_entryhi());
+ oldpid = read_c0_entryhi() & ASID_MASK;
write_c0_entryhi(page | newpid);
BARRIER;
tlb_probe();
if (current->active_mm != vma->vm_mm)
return;
- pid = ASID_MASK(read_c0_entryhi());
+ pid = read_c0_entryhi() & ASID_MASK;
#ifdef DEBUG_TLB
- if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid);
}
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
- old_ctx = ASID_MASK(read_c0_entryhi());
+ old_ctx = read_c0_entryhi() & ASID_MASK;
old_pagemask = read_c0_pagemask();
w = read_c0_wired();
write_c0_wired(w + 1);
#endif
local_irq_save(flags);
- old_ctx = ASID_MASK(read_c0_entryhi());
+ old_ctx = read_c0_entryhi() & ASID_MASK;
write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi);
write_c0_index(wired);
ENTER_CRITICAL(flags);
- pid = ASID_MASK(read_c0_entryhi());
+ pid = read_c0_entryhi() & ASID_MASK;
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
if (current->active_mm != vma->vm_mm)
return;
- pid = ASID_MASK(read_c0_entryhi());
+ pid = read_c0_entryhi() & ASID_MASK;
local_irq_save(flags);
address &= PAGE_MASK;
#include <linux/init.h>
#include <linux/cache.h>
-#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/war.h>
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
-#ifdef CONFIG_64BIT
-static int check_for_high_segbits __cpuinitdata;
-#endif
-
-static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
- unsigned int i_const)
-{
- unsigned int **p;
-
- for (p = start; p < stop; p++) {
-#ifndef CONFIG_CPU_MICROMIPS
- unsigned int *ip;
-
- ip = *p;
- *ip = (*ip & 0xffff0000) | i_const;
-#else
- unsigned short *ip;
-
- ip = ((unsigned short *)((unsigned int)*p - 1));
- if ((*ip & 0xf000) == 0x4000) {
- *ip &= 0xfff1;
- *ip |= (i_const << 1);
- } else if ((*ip & 0xf000) == 0x6000) {
- *ip &= 0xfff1;
- *ip |= ((i_const >> 2) << 1);
- } else {
- ip++;
- *ip = i_const;
- }
-#endif
- local_flush_icache_range((unsigned long)ip,
- (unsigned long)ip + sizeof(*ip));
- }
-}
-
-#define asid_insn_fixup(section, const) \
-do { \
- extern unsigned int *__start_ ## section; \
- extern unsigned int *__stop_ ## section; \
- insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
-} while(0)
-
-/*
- * Caller is assumed to flush the caches before the first context switch.
- */
-static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
- unsigned int version_mask,
- unsigned int first_version)
-{
- extern asmlinkage void handle_ri_rdhwr_vivt(void);
- unsigned long *vivt_exc;
-
-#ifdef CONFIG_CPU_MICROMIPS
- /*
- * Worst case optimised microMIPS addiu instructions support
- * only a 3-bit immediate value.
- */
- if(inc > 7)
- panic("Invalid ASID increment value!");
-#endif
- asid_insn_fixup(__asid_inc, inc);
- asid_insn_fixup(__asid_mask, mask);
- asid_insn_fixup(__asid_version_mask, version_mask);
- asid_insn_fixup(__asid_first_version, first_version);
-
- /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
- vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
-#ifdef CONFIG_CPU_MICROMIPS
- vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
-#endif
- vivt_exc++;
- *vivt_exc = (*vivt_exc & ~mask) | mask;
-
- current_cpu_data.asid_cache = first_version;
-}
-
static int check_for_high_segbits __cpuinitdata;
static unsigned int kscratch_used_mask __cpuinitdata;
case CPU_TX3922:
case CPU_TX3927:
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
- setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
if (cpu_has_local_ebase)
build_r3000_tlb_refill_handler();
if (!run_once) {
break;
default:
-#ifndef CONFIG_MIPS_MT_SMTC
- setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
-#else
- setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
-#endif
if (!run_once) {
scratch_reg = allocate_kscratch();
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
#include <linux/pm.h>
#include <linux/bootmem.h>
+#include <asm/idle.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <asm/bootinfo.h>
#include <linux/serial_8250.h>
#include <linux/pm.h>
+#include <asm/idle.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <asm/bootinfo.h>
return 0; /* foo */
}
-static inline int str2eaddr(unsigned char *ea, unsigned char *str)
+int str2eaddr(unsigned char *ea, unsigned char *str)
{
int index = 0;
unsigned char num = 0;
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
+#include <asm/idle.h>
#include <asm/r4kcache.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
reg-shift = <2>;
};
};
+
+ usb@101c0000 {
+ compatible = "ralink,rt3050-usb", "snps,dwc2";
+ reg = <0x101c0000 40000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+
+ status = "disabled";
+ };
};
reg = <0x50000 0x7b0000>;
};
};
+
+ usb@101c0000 {
+ status = "ok";
+ };
};
__dt_setup_arch(&__dtb_start);
if (soc_info.mem_size)
- add_memory_region(soc_info.mem_base, soc_info.mem_size,
+ add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
BOOT_MEM_RAM);
else
detect_memory_region(soc_info.mem_base,
#include <linux/slab.h>
#include <linux/irq.h>
#include <asm/bootinfo.h>
+#include <asm/idle.h>
#include <asm/time.h>
#include <asm/reboot.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
+#include <asm/idle.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <linux/kernel.h>
#include <asm/cacheflush.h>
+#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
return channel ? 15 : 14;
}
+#include <asm-generic/pci_iomap.h>
+
#endif /* _ASM_PCI_H */
mov (REG_D0,fp),d0
mov (REG_A0,fp),a0
calls (a0)
+ GET_THREAD_INFO a2 # A2 must be set on return from sys_exit()
clr d0
mov d0,(REG_D0,fp)
jmp syscall_exit
and EPSW_nSL,d0
beq resume_kernel # returning to supervisor mode
- btst _TIF_SYSCALL_TRACE,d2
- beq work_pending
LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
# schedule() instead
+ btst _TIF_SYSCALL_TRACE,d2
+ beq work_pending
mov fp,d0
call syscall_trace_exit[],0 # do_syscall_trace(regs)
jmp resume_userspace
work_resched:
call schedule[],0
+resume_userspace:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
LOCAL_IRQ_DISABLE
mov (TI_flags,a2),d2
btst _TIF_WORK_MASK,d2
beq restore_all
+
+ LOCAL_IRQ_ENABLE
btst _TIF_NEED_RESCHED,d2
bne work_resched
and EPSW_nSL,d0
beq resume_kernel # returning to supervisor mode
-ENTRY(resume_userspace)
- # make sure we don't miss an interrupt setting need_resched or
- # sigpending between sampling and the rti
- LOCAL_IRQ_DISABLE
-
- # is there any work to be done on int/exception return?
- mov (TI_flags,a2),d2
- btst _TIF_WORK_MASK,d2
- bne work_pending
- jmp restore_all
-
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
LOCAL_IRQ_DISABLE
#include <linux/delay.h>
#include <linux/irq.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include "pci-asb2305.h"
unsigned int pci_probe = 1;
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
- default n
+ default y
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
CHECKFLAGS += -D__hppa__=1
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-MACHINE := $(shell uname -m)
-NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0)
-
ifdef CONFIG_64BIT
UTS_MACHINE := parisc64
CHECKFLAGS += -D__LP64__=1 -m64
-WIDTH := 64
+CC_ARCHES = hppa64
else # 32-bit
-WIDTH :=
+CC_ARCHES = hppa hppa2.0 hppa1.1
endif
-# attempt to help out folks who are cross-compiling
-ifeq ($(NATIVE),1)
-CROSS_COMPILE := hppa$(WIDTH)-linux-
-else
- ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
- endif
+ifneq ($(SUBARCH),$(UTS_MACHINE))
+ ifeq ($(CROSS_COMPILE),)
+ CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+ CROSS_COMPILE := $(call cc-cross-prefix, \
+ $(foreach a,$(CC_ARCHES), \
+ $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
+ endif
endif
OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
endif
# select which processor to optimise for
-cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
+cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100
cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200
cflags-$(CONFIG_PA7100LC) += -march=1.1 -mschedule=7100LC
cflags-$(CONFIG_PA7300LC) += -march=1.1 -mschedule=7300
SAVE_SP (%sr4, PT_SR4 (\regs))
SAVE_SP (%sr5, PT_SR5 (\regs))
SAVE_SP (%sr6, PT_SR6 (\regs))
- SAVE_SP (%sr7, PT_SR7 (\regs))
SAVE_CR (%cr17, PT_IASQ0(\regs))
mtctl %r0, %cr17
#include <linux/threads.h>
#include <linux/irq.h>
+#ifdef CONFIG_IRQSTACKS
+#define __ARCH_HAS_DO_SOFTIRQ
+#endif
+
typedef struct {
unsigned int __softirq_pending;
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
unsigned int kernel_stack_usage;
-#endif
+ unsigned int irq_stack_usage;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
#endif
+ unsigned int irq_unaligned_count;
+ unsigned int irq_fpassist_count;
unsigned int irq_tlb_count;
} ____cacheline_aligned irq_cpustat_t;
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
+#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING
static inline int pfn_to_nid(unsigned long pfn)
{
unsigned int i;
- unsigned char r;
if (unlikely(pfn_is_io(pfn)))
return 0;
i = pfn >> PFNNID_SHIFT;
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
- r = pfnnid_map[i];
- BUG_ON(r == 0xff);
- return (int)r;
+ return (int)pfnnid_map[i];
}
static inline int pfn_valid(int pfn)
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/percpu.h>
-
#endif /* __ASSEMBLY__ */
/*
#ifndef __ASSEMBLY__
-/*
- * IRQ STACK - used for irq handler
- */
-#ifdef __KERNEL__
-
-#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
-
-union irq_stack_union {
- unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
-};
-
-DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
-
-void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
-
-#endif /* __KERNEL__ */
-
/*
* Data detected about CPUs at boot time which is the same for all CPU's.
* HP boxes are SMP - ie identical processors.
static void setup_bus_id(struct parisc_device *padev)
{
struct hardware_path path;
- char name[20];
+ char name[28];
char *output = name;
int i;
rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
mtsp %r0, %sr4
mtsp %r0, %sr5
- mfsp %sr7, %r1
- or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
- mtsp %r1, %sr3
+ mtsp %r0, %sr6
tovirt_r1 %r29
load32 KERNEL_PSW, %r1
rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
- mtsp %r0, %sr6
- mtsp %r0, %sr7
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %ipsw
/* we save the registers in the task struct */
+ copy %r30, %r17
mfctl %cr30, %r1
+ ldo THREAD_SZ_ALGN(%r1), %r30
+ mtsp %r0,%sr7
+ mtsp %r16,%sr3
tophys %r1,%r9
LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
tophys %r1,%r9
ldo TASK_REGS(%r9),%r9
- STREG %r30, PT_GR30(%r9)
+ STREG %r17,PT_GR30(%r9)
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
+ STREG %r16,PT_SR7(%r9)
copy %r9,%r29
- mfctl %cr30, %r1
- ldo THREAD_SZ_ALGN(%r1), %r30
.endm
.macro get_stack_use_r30
/* we put a struct pt_regs on the stack and save the registers there */
tophys %r30,%r9
- STREG %r30,PT_GR30(%r9)
+ copy %r30,%r1
ldo PT_SZ_ALGN(%r30),%r30
+ STREG %r1,PT_GR30(%r9)
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
+ STREG %r16,PT_SR7(%r9)
copy %r9,%r29
.endm
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
+ /* Acquire pa_dbit_lock lock. */
+ .macro dbit_lock spc,tmp,tmp1
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,\spc,2f
+ load32 PA(pa_dbit_lock),\tmp
+1: LDCW 0(\tmp),\tmp1
+ cmpib,COND(=) 0,\tmp1,1b
+ nop
+2:
+#endif
+ .endm
+
+ /* Release pa_dbit_lock lock without reloading lock address. */
+ .macro dbit_unlock0 spc,tmp
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
+#endif
+ .endm
+
+ /* Release pa_dbit_lock lock. */
+ .macro dbit_unlock1 spc,tmp
+#ifdef CONFIG_SMP
+ load32 PA(pa_dbit_lock),\tmp
+ dbit_unlock0 \spc,\tmp
+#endif
+ .endm
+
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
* don't needlessly dirty the cache line if it was already set */
- .macro update_ptep ptep,pte,tmp,tmp1
+ .macro update_ptep spc,ptep,pte,tmp,tmp1
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ LDREG 0(\ptep),\pte
+#endif
ldi _PAGE_ACCESSED,\tmp1
or \tmp1,\pte,\tmp
and,COND(<>) \tmp1,\pte,%r0
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty ptep,pte,tmp
+ .macro update_dirty spc,ptep,pte,tmp
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ LDREG 0(\ptep),\pte
+#endif
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
STREG \pte,0(\ptep)
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L3_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
iitlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
iitlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
L3_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_20w
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_20w:
- LDCW 0(t0),t1
- cmpib,COND(=) 0,t1,dbit_spin_20w
- nop
-
-dbit_nolock_20w:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_20w
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_20w:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_11
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_11:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_11
- nop
-
-dbit_nolock_11:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_11
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_11:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
L2_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_20
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_20:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_20
- nop
-
-dbit_nolock_20:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb spc,pte,prot
f_extend pte,t1
idtlbt pte,prot
-
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_20
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_20:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
{HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+ {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"},
{HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
{HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
{HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/smp.h>
+#include <asm/ldcw.h>
#undef PARISC_IRQ_CR16_COUNTS
seq_printf(p, "%*s: ", prec, "STK");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
- seq_printf(p, " Kernel stack usage\n");
+ seq_puts(p, " Kernel stack usage\n");
+# ifdef CONFIG_IRQSTACKS
+ seq_printf(p, "%*s: ", prec, "IST");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
+ seq_puts(p, " Interrupt stack usage\n");
+# endif
#endif
#ifdef CONFIG_SMP
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
- seq_printf(p, " Rescheduling interrupts\n");
+ seq_puts(p, " Rescheduling interrupts\n");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
- seq_printf(p, " Function call interrupts\n");
+ seq_puts(p, " Function call interrupts\n");
#endif
+ seq_printf(p, "%*s: ", prec, "UAH");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
+ seq_puts(p, " Unaligned access handler traps\n");
+ seq_printf(p, "%*s: ", prec, "FPA");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
+ seq_puts(p, " Floating point assist traps\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
- seq_printf(p, " TLB shootdowns\n");
+ seq_puts(p, " TLB shootdowns\n");
return 0;
}
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+ unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+ volatile unsigned int slock[4];
+ volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+ .slock = { 1,1,1,1 },
+ };
+#endif
+
+
int sysctl_panic_on_stackoverflow = 1;
static inline void stack_overflow_check(struct pt_regs *regs)
unsigned long sp = regs->gr[30];
unsigned long stack_usage;
unsigned int *last_usage;
+ int cpu = smp_processor_id();
/* if sr7 != 0, we interrupted a userspace process which we do not want
* to check for stack overflow. We will only check the kernel stack. */
/* calculate kernel stack usage */
stack_usage = sp - stack_start;
- last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
+#ifdef CONFIG_IRQSTACKS
+ if (likely(stack_usage <= THREAD_SIZE))
+ goto check_kernel_stack; /* found kernel stack */
+
+ /* check irq stack usage */
+ stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
+ stack_usage = sp - stack_start;
+
+ last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
+ if (unlikely(stack_usage > *last_usage))
+ *last_usage = stack_usage;
+
+ if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
+ return;
+
+ pr_emerg("stackcheck: %s will most likely overflow irq stack "
+ "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+ current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
+ goto panic_check;
+
+check_kernel_stack:
+#endif
+
+ /* check kernel stack usage */
+ last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + THREAD_SIZE);
+#ifdef CONFIG_IRQSTACKS
+panic_check:
+#endif
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
#endif
}
#ifdef CONFIG_IRQSTACKS
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
static void execute_on_irq_stack(void *func, unsigned long param1)
{
- unsigned long *irq_stack_start;
+ union irq_stack_union *union_ptr;
unsigned long irq_stack;
- int cpu = smp_processor_id();
+ volatile unsigned int *irq_stack_in_use;
+
+ union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
+ irq_stack = (unsigned long) &union_ptr->stack;
+ irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
+ 64); /* align for stack frame usage */
- irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
- irq_stack = (unsigned long) irq_stack_start;
- irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
+ /* We may be called recursive. If we are already using the irq stack,
+ * just continue to use it. Use spinlocks to serialize
+ * the irq stack usage.
+ */
+ irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+ if (!__ldcw(irq_stack_in_use)) {
+ void (*direct_call)(unsigned long p1) = func;
- BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
- *irq_stack_start = 1;
+ /* We are using the IRQ stack already.
+ * Do direct call on current stack. */
+ direct_call(param1);
+ return;
+ }
/* This is where we switch to the IRQ stack. */
call_on_stack(param1, func, irq_stack);
- *irq_stack_start = 0;
+ /* free up irq stack usage. */
+ *irq_stack_in_use = 1;
+}
+
+asmlinkage void do_softirq(void)
+{
+ __u32 pending;
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ pending = local_softirq_pending();
+
+ if (pending)
+ execute_on_irq_stack(__do_softirq, 0);
+
+ local_irq_restore(flags);
}
#endif /* CONFIG_IRQSTACKS */
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
#endif
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
#endif
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
/* called from hpux boot loader */
boot_command_line[0] = '\0';
} else {
- strcpy(boot_command_line, (char *)__va(boot_args[1]));
+ strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+ COMMAND_LINE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
case 14:
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
+ __inc_irq_stat(irq_fpassist_count);
handle_fpe(regs);
return;
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <asm/uaccess.h>
+#include <asm/hardirq.h>
/* #define DEBUG_UNALIGNED 1 */
struct siginfo si;
register int flop=0; /* true if this is a flop */
+ __inc_irq_stat(irq_unaligned_count);
+
/* log a message with pacing */
if (user_mode(regs)) {
if (current->thread.flags & PARISC_UAC_SIGBUS) {
{
int do_recycle;
- inc_irq_stat(irq_tlb_count);
+ __inc_irq_stat(irq_tlb_count);
do_recycle = 0;
spin_lock(&sid_lock);
if (dirty_space_ids > RECYCLE_THRESHOLD) {
#else
void flush_tlb_all(void)
{
- inc_irq_stat(irq_tlb_count);
+ __inc_irq_stat(irq_tlb_count);
spin_lock(&sid_lock);
flush_tlb_all_local(NULL);
recycle_sids();
Select this to enable early debugging for the PowerNV platform
using an "hvsi" console
+config PPC_EARLY_DEBUG_MEMCONS
+ bool "In memory console"
+ help
+ Select this to enable early debugging using an in memory console.
+ This console provides input and output buffers stored within the
+ kernel BSS and should be safe to select on any system. A debugger
+ can then be used to read kernel output or send input to the console.
endchoice
+config PPC_MEMCONS_OUTPUT_SIZE
+ int "In memory console output buffer size"
+ depends on PPC_EARLY_DEBUG_MEMCONS
+ default 4096
+ help
+ Selects the size of the output buffer (in bytes) of the in memory
+ console.
+
+config PPC_MEMCONS_INPUT_SIZE
+ int "In memory console input buffer size"
+ depends on PPC_EARLY_DEBUG_MEMCONS
+ default 128
+ help
+ Selects the size of the input buffer (in bytes) of the in memory
+ console.
+
config PPC_EARLY_DEBUG_OPAL
def_bool y
depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
CONFIG_USB_HIDDEV=y
CONFIG_USB=m
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
--- /dev/null
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+#define SCHEDULE_USER bl .schedule_user
+#else
+#define SCHEDULE_USER bl .schedule
+#endif
+
+#endif
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
#ifndef __ASSEMBLY__
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
+ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
+ FW_FEATURE_OPALv3,
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
#define H_GET_MPP 0x2D4
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
#define H_BEST_ENERGY 0x2F4
+#define H_XIRR_X 0x2FC
#define H_RANDOM 0x300
#define H_COP 0x304
#define H_GET_MPP_X 0x314
#endif
#define hard_irq_disable() do { \
+ u8 _was_enabled = get_paca()->soft_enabled; \
__hard_irq_disable(); \
- if (local_paca->soft_enabled) \
- trace_hardirqs_off(); \
get_paca()->soft_enabled = 0; \
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
+ if (_was_enabled) \
+ trace_hardirqs_off(); \
} while(0)
static inline bool lazy_irq_pending(void)
enum OpalThreadStatus {
OPAL_THREAD_INACTIVE = 0x0,
- OPAL_THREAD_STARTED = 0x1
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
};
enum OpalPciBusCompare {
extern int opal_machine_check(struct pt_regs *regs);
+extern void opal_shutdown(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
+
extern void * update_dn_pci_info(struct device_node *dn, void *data);
static inline int pci_device_from_OF_node(struct device_node *np,
static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
- return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
+ return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
#define PPC440EP_ERR42
#endif
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly). The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
+.machine push ; \
+.machine "power4" ; \
+ lis scratch,0x60000000@h; \
+ dcbt r0,scratch,0b01010; \
+.machine pop
+
/*
* toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
* keep the address intact to be compatible with code shared with
unsigned long ebbrr;
unsigned long ebbhr;
unsigned long bescr;
+ unsigned long siar;
+ unsigned long sdar;
+ unsigned long sier;
+ unsigned long mmcr0;
+ unsigned long mmcr2;
+ unsigned long mmcra;
#endif
};
#endif
#ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- unsigned long sp;
-
if (is_32)
- sp = regs->gpr[1] & 0x0ffffffffUL;
- else
- sp = regs->gpr[1];
-
+ return sp & 0x0ffffffffUL;
return sp;
}
#else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- return regs->gpr[1];
+ return sp;
}
#endif
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), ((e) & _PAGE_COMBO) ? \
+ (e), (pte_val(e) & _PAGE_COMBO) ? \
(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
-/* Reason codes describing kernel causes for transaction aborts. By
- convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
- the failure is persistent.
-*/
-#define TM_CAUSE_RESCHED 0xfe
-#define TM_CAUSE_TLBI 0xfc
-#define TM_CAUSE_FAC_UNAV 0xfa
-#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
-#define TM_CAUSE_MISC 0xf6
-#define TM_CAUSE_SIGNAL 0xf4
-
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
extern int rtas_ibm_suspend_me(struct rtas_args *);
struct rtc_time;
#define __ARCH_HAS_SA_RESTORER
#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
#endif /* _ASM_POWERPC_SIGNAL_H */
#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
+#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
* Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
*/
+#include <uapi/asm/tm.h>
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void do_load_up_transact_fpu(struct thread_struct *thread);
extern void do_load_up_transact_altivec(struct thread_struct *thread);
extern void __init udbg_init_cpm(void);
extern void __init udbg_init_usbgecko(void);
extern void __init udbg_init_wsp(void);
+extern void __init udbg_init_memcons(void);
extern void __init udbg_init_ehv_bc(void);
extern void __init udbg_init_ps3gelic(void);
extern void __init udbg_init_debug_opal_raw(void);
header-y += swab.h
header-y += termbits.h
header-y += termios.h
+header-y += tm.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
--- /dev/null
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts. By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT 0x01
+#define TM_CAUSE_RESCHED 0xde
+#define TM_CAUSE_TLBI 0xdc
+#define TM_CAUSE_FAC_UNAV 0xda
+#define TM_CAUSE_SYSCALL 0xd8 /* future use */
+#define TM_CAUSE_MISC 0xd6 /* future use */
+#define TM_CAUSE_SIGNAL 0xd4
+#define TM_CAUSE_ALIGNMENT 0xd2
+#define TM_CAUSE_EMULATE 0xd0
+
+#endif
DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
+ DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
+ DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
+ DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
+ DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
+ DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
+ DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
blr
__init_TLB:
- /* Clear the TLB */
- li r6,128
+ /*
+ * Clear the TLB using the "IS 3" form of tlbiel instruction
+ * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
+ * so we just always do 512
+ */
+ li r6,512
mtctr r6
li r7,0xc00 /* IS field = 0b11 */
ptesync
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
+ .oprofile_cpu_type = 0,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",
.cpu_name = "POWER7+ (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .cpu_user_features = COMMON_USER2_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_cpu_type = 0,
.oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
ret_from_kernel_thread:
REST_NVGPRS(r1)
bl schedule_tail
- li r3,0
- stw r3,0(r1)
mtlr r14
mr r3,r15
PPC440EP_ERR42
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_FLAGS(r9)
- andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
#include <asm/irqflags.h>
#include <asm/ftrace.h>
#include <asm/hw_irq.h>
+#include <asm/context_tracking.h>
/*
* System calls.
_GLOBAL(ret_from_kernel_thread)
bl .schedule_tail
REST_NVGPRS(r1)
- li r3,0
- std r3,0(r1)
ld r14, 0(r14)
mtlr r14
mr r3,r15
std r0, THREAD_EBBHR(r3)
mfspr r0, SPRN_EBBRR
std r0, THREAD_EBBRR(r3)
+
+ /* PMU registers made user read/(write) by EBB */
+ mfspr r0, SPRN_SIAR
+ std r0, THREAD_SIAR(r3)
+ mfspr r0, SPRN_SDAR
+ std r0, THREAD_SDAR(r3)
+ mfspr r0, SPRN_SIER
+ std r0, THREAD_SIER(r3)
+ mfspr r0, SPRN_MMCR0
+ std r0, THREAD_MMCR0(r3)
+ mfspr r0, SPRN_MMCR2
+ std r0, THREAD_MMCR2(r3)
+ mfspr r0, SPRN_MMCRA
+ std r0, THREAD_MMCRA(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
ldarx r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+ DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
ld r0, THREAD_EBBRR(r4)
mtspr SPRN_EBBRR, r0
+ /* PMU registers made user read/(write) by EBB */
+ ld r0, THREAD_SIAR(r4)
+ mtspr SPRN_SIAR, r0
+ ld r0, THREAD_SDAR(r4)
+ mtspr SPRN_SDAR, r0
+ ld r0, THREAD_SIER(r4)
+ mtspr SPRN_SIER, r0
+ ld r0, THREAD_MMCR0(r4)
+ mtspr SPRN_MMCR0, r0
+ ld r0, THREAD_MMCR2(r4)
+ mtspr SPRN_MMCR2, r0
+ ld r0, THREAD_MMCRA(r4)
+ mtspr SPRN_MMCRA, r0
+
ld r0,THREAD_TAR(r4)
mtspr SPRN_TAR,r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .restore_interrupts
- bl .schedule
+ SCHEDULE_USER
b .ret_from_except_lite
1: bl .save_nvgprs
*/
mfspr r14,SPRN_DBSR /* check single-step/branch taken */
- andis. r15,r14,DBSR_IC@h
+ andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
bge+ cr1,1f
/* here it looks like we got an inappropriate debug exception. */
- lis r14,DBSR_IC@h /* clear the IC event */
+ lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
mtspr SPRN_DBSR,r14
mtspr SPRN_CSRR1,r11
*/
mfspr r14,SPRN_DBSR /* check single-step/branch taken */
- andis. r15,r14,DBSR_IC@h
+ andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
bge+ cr1,1f
/* here it looks like we got an inappropriate debug exception. */
- lis r14,DBSR_IC@h /* clear the IC event */
+ lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
mtspr SPRN_DBSR,r14
mtspr SPRN_DSRR1,r11
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
+#include <linux/hardirq.h>
#include <asm/page.h>
#include <asm/current.h>
pr_debug("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
+ * We setup preempt_count to avoid using VMX in memcpy.
* XXX: the task struct will likely be invalid once we do the copy!
*/
kexec_stack.thread_info.task = current_thread_info()->task;
kexec_stack.thread_info.flags = 0;
+ kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
+ kexec_stack.thread_info.cpu = current_thread_info()->cpu;
/* We need a static PACA, too; copy this CPU's PACA over and switch to
* it. Also poison per_cpu_offset to catch anyone using non-static
li r3,2
blr
+_GLOBAL(__bswapdi2)
+ rotlwi r9,r4,8
+ rotlwi r10,r3,8
+ rlwimi r9,r4,24,0,7
+ rlwimi r10,r3,24,0,7
+ rlwimi r9,r4,24,16,23
+ rlwimi r10,r3,24,16,23
+ mr r3,r9
+ mr r4,r10
+ blr
+
_GLOBAL(abs)
srawi r4,r3,31
xor r3,r3,r4
isync
blr
+_GLOBAL(__bswapdi2)
+ srdi r8,r3,32
+ rlwinm r7,r3,8,0xffffffff
+ rlwimi r7,r3,24,0,7
+ rlwinm r9,r8,8,0xffffffff
+ rlwimi r7,r3,24,16,23
+ rlwimi r9,r8,24,0,7
+ rlwimi r9,r8,24,16,23
+ sldi r7,r7,32
+ or r3,r7,r9
+ blr
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
enum pci_mmap_state mmap_state,
int write_combine)
{
- unsigned long prot = pgprot_val(protection);
/* Write combine is always 0 on non-memory space mappings. On
* memory space, if the user didn't pass 1, we check for a
/* XXX would be nice to have a way to ask for write-through */
if (write_combine)
- return pgprot_noncached_wc(prot);
+ return pgprot_noncached_wc(protection);
else
- return pgprot_noncached(prot);
+ return pgprot_noncached(protection);
}
/*
* ranges. However, some machines (thanks Apple !) tend to split their
* space into lots of small contiguous ranges. So we have to coalesce.
*
- * - We can only cope with all memory ranges having the same offset
- * between CPU addresses and PCI addresses. Unfortunately, some bridges
- * are setup for a large 1:1 mapping along with a small "window" which
- * maps PCI address 0 to some arbitrary high address of the CPU space in
- * order to give access to the ISA memory hole.
- * The way out of here that I've chosen for now is to always set the
- * offset based on the first resource found, then override it if we
- * have a different offset and the previous was set by an ISA hole.
- *
* - Some busses have IO space not starting at 0, which causes trouble with
* the way we do our IO resource renumbering. The code somewhat deals with
* it for 64 bits but I would expect problems on 32 bits.
int rlen;
int pna = of_n_addr_cells(dev);
int np = pna + 5;
- int memno = 0, isa_hole = -1;
+ int memno = 0;
u32 pci_space;
unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
- unsigned long long isa_mb = 0;
struct resource *res;
printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
}
/* Handles ISA memory hole space here */
if (pci_addr == 0) {
- isa_mb = cpu_addr;
- isa_hole = memno;
if (primary || isa_mem_base == 0)
isa_mem_base = cpu_addr;
hose->isa_mem_phys = cpu_addr;
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
if (!res->flags) {
- printk(KERN_ERR "PCI: Memory resource 0 not set for "
- "host bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
+ if (i == 0)
+ printk(KERN_ERR "PCI: Memory resource 0 not set for "
+ "host bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
continue;
}
offset = hose->mem_offset[i];
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
+
+static void quirk_radeon_32bit_msi(struct pci_dev *dev)
+{
+ struct pci_dn *pdn = pci_get_pdn(dev);
+
+ if (pdn)
+ pdn->force_32bit_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
+{
+ struct device_node *dn = pci_device_to_OF_node(pdev);
+ if (!dn)
+ return NULL;
+ return PCI_DN(dn);
+}
+
/*
* Traverse_func that inits the PCI fields of the device node.
* NOTE: this *must* be done before read/write config to the device.
int __ucmpdi2(unsigned long long, unsigned long long);
EXPORT_SYMBOL(__ucmpdi2);
#endif
-
+long long __bswapdi2(long long);
+EXPORT_SYMBOL(__bswapdi2);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
static void prime_debug_regs(struct thread_struct *thread)
{
+ /*
+ * We could have inherited MSR_DE from userspace, since
+ * it doesn't get cleared on exception entry. Make sure
+ * MSR_DE is clear before we enable any debug events.
+ */
+ mtmsr(mfmsr() & ~MSR_DE);
+
mtspr(SPRN_IAC1, thread->iac1);
mtspr(SPRN_IAC2, thread->iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
* do some house keeping and then return from the fork or clone
* system call, using the stack frame created above.
*/
+ ((unsigned long *)sp)[0] = 0;
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
#include <trace/syscall.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
+#include <linux/context_tracking.h>
#include <asm/uaccess.h>
#include <asm/page.h>
{
long ret = 0;
+ user_exit();
+
secure_computing_strict(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
+
+ user_enter();
}
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
}
+enum rtas_cpu_state {
+ DOWN,
+ UP,
+};
+
+#ifndef CONFIG_SMP
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ cpumask_var_t cpus)
+{
+ if (!cpumask_empty(cpus)) {
+ cpumask_clear(cpus);
+ return -EINVAL;
+ } else
+ return 0;
+}
+#else
+/* On return cpumask will be altered to indicate CPUs changed.
+ * CPUs with states changed will be set in the mask,
+ * CPUs with status unchanged will be unset in the mask. */
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ cpumask_var_t cpus)
+{
+ int cpu;
+ int cpuret = 0;
+ int ret = 0;
+
+ if (cpumask_empty(cpus))
+ return 0;
+
+ for_each_cpu(cpu, cpus) {
+ switch (state) {
+ case DOWN:
+ cpuret = cpu_down(cpu);
+ break;
+ case UP:
+ cpuret = cpu_up(cpu);
+ break;
+ }
+ if (cpuret) {
+ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+ __func__,
+ ((state == UP) ? "up" : "down"),
+ cpu, cpuret);
+ if (!ret)
+ ret = cpuret;
+ if (state == UP) {
+ /* clear bits for unchanged cpus, return */
+ cpumask_shift_right(cpus, cpus, cpu);
+ cpumask_shift_left(cpus, cpus, cpu);
+ break;
+ } else {
+ /* clear bit for unchanged cpu, continue */
+ cpumask_clear_cpu(cpu, cpus);
+ }
+ }
+ }
+
+ return ret;
+}
+#endif
+
+int rtas_online_cpus_mask(cpumask_var_t cpus)
+{
+ int ret;
+
+ ret = rtas_cpu_state_change_mask(UP, cpus);
+
+ if (ret) {
+ cpumask_var_t tmp_mask;
+
+ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+ return ret;
+
+ /* Use tmp_mask to preserve cpus mask from first failure */
+ cpumask_copy(tmp_mask, cpus);
+ rtas_offline_cpus_mask(tmp_mask);
+ free_cpumask_var(tmp_mask);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(rtas_online_cpus_mask);
+
+int rtas_offline_cpus_mask(cpumask_var_t cpus)
+{
+ return rtas_cpu_state_change_mask(DOWN, cpus);
+}
+EXPORT_SYMBOL(rtas_offline_cpus_mask);
+
int rtas_ibm_suspend_me(struct rtas_args *args)
{
long state;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
struct rtas_suspend_me_data data;
DECLARE_COMPLETION_ONSTACK(done);
+ cpumask_var_t offline_mask;
+ int cpuret;
if (!rtas_service_present("ibm,suspend-me"))
return -ENOSYS;
return 0;
}
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
atomic_set(&data.working, 0);
atomic_set(&data.done, 0);
atomic_set(&data.error, 0);
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+ cpuret = rtas_online_cpus_mask(offline_mask);
+ if (cpuret) {
+ pr_err("%s: Could not bring present CPUs online.\n", __func__);
+ atomic_set(&data.error, cpuret);
+ goto out;
+ }
+
stop_topology_update();
/* Call function on all CPUs. One of us will make the
start_topology_update();
+ /* Take down CPUs not online prior to suspend */
+ cpuret = rtas_offline_cpus_mask(offline_mask);
+ if (cpuret)
+ pr_warn("%s: Could not restore CPUs to offline state.\n",
+ __func__);
+
+out:
+ free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
/* Array sizes */
#define VALIDATE_BUF_SIZE 4096
+#define VALIDATE_MSG_LEN 256
#define RTAS_MSG_MAXLEN 64
/* Quirk - RTAS requires 4k list length and block size */
}
static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
- char *msg)
+ char *msg, int msglen)
{
int n;
n = sprintf(msg, "%d\n", args_buf->update_results);
if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
(args_buf->update_results == VALIDATE_TMP_UPDATE))
- n += sprintf(msg + n, "%s\n", args_buf->buf);
+ n += snprintf(msg + n, msglen - n, "%s\n",
+ args_buf->buf);
} else {
n = sprintf(msg, "%d\n", args_buf->status);
}
{
struct rtas_validate_flash_t *const args_buf =
&rtas_validate_flash_data;
- char msg[RTAS_MSG_MAXLEN];
+ char msg[VALIDATE_MSG_LEN];
int msglen;
mutex_lock(&rtas_validate_flash_mutex);
- msglen = get_validate_flash_msg(args_buf, msg);
+ msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
mutex_unlock(&rtas_validate_flash_mutex);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
#include <linux/signal.h>
#include <linux/uprobes.h>
#include <linux/key.h>
+#include <linux/context_tracking.h>
#include <asm/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/debug.h>
+#include <asm/tm.h>
#include "signal.h"
* through debug.exception-trace sysctl.
*/
-int show_unhandled_signals = 0;
+int show_unhandled_signals = 1;
/*
* Allocate space for the signal frame
*/
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32)
{
unsigned long oldsp, newsp;
/* Default to using normal stack */
- oldsp = get_clean_sp(regs, is_32);
+ oldsp = get_clean_sp(sp, is_32);
/* Check for alt stack */
if ((ka->sa.sa_flags & SA_ONSTACK) &&
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
+ user_exit();
+
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
+
+ user_enter();
+}
+
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
+{
+ /* When in an active transaction that takes a signal, we need to be
+ * careful with the stack. It's possible that the stack has moved back
+ * up after the tbegin. The obvious case here is when the tbegin is
+ * called inside a function that returns before a tend. In this case,
+ * the stack is part of the checkpointed transactional memory state.
+ * If we write over this non transactionally or in suspend, we are in
+ * trouble because if we get a tm abort, the program counter and stack
+ * pointer will be back at the tbegin but our in memory stack won't be
+ * valid anymore.
+ *
+ * To avoid this, when taking a signal in an active transaction, we
+ * need to use the stack pointer from the checkpointed state, rather
+ * than the speculated state. This ensures that the signal context
+ * (written tm suspended) will be written below the stack required for
+ * the rollback. The transaction is aborted becuase of the treclaim,
+ * so any memory written between the tbegin and the signal will be
+ * rolled back anyway.
+ *
+ * For signals taken in non-TM or suspended mode, we use the
+ * normal/non-checkpointed stack pointer.
+ */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ tm_enable();
+ tm_reclaim(¤t->thread, regs->msr, TM_CAUSE_SIGNAL);
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
+ return current->thread.ckpt_regs.gpr[1];
+ }
+#endif
+ return regs->gpr[1];
}
extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32);
extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
{
unsigned long msr = regs->msr;
- /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
- * thread.transact_fpr[], thread.transact_vr[], etc.
- */
- tm_enable();
- tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL);
-
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
/* Set up Signal Frame */
/* Put a Real Time Context onto stack */
- rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+ rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
addr = rt_sf;
if (unlikely(rt_sf == NULL))
goto badframe;
unsigned long tramp;
/* Set up Signal Frame */
- frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
if (unlikely(frame == NULL))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
* As above, but Transactional Memory is in use, so deliver sigcontexts
* containing checkpointed and transactional register states.
*
- * To do this, we treclaim to gather both sets of registers and set up the
- * 'normal' sigcontext registers with rolled-back register values such that a
- * simple signal handler sees a correct checkpointed register state.
- * If interested, a TM-aware sighandler can examine the transactional registers
- * in the 2nd sigcontext to determine the real origin of the signal.
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state. If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
*/
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
- /* tm_reclaim rolls back all reg states, saving checkpointed (older)
- * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
- * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
- * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
- * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
- * stack, in *regs.
- */
- tm_enable();
- tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL);
-
flush_fp_to_thread(current);
#ifdef CONFIG_ALTIVEC
unsigned long newsp = 0;
long err = 0;
- frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
if (unlikely(frame == NULL))
goto badframe;
#include <linux/kdebug.h>
#include <linux/debugfs.h>
#include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
#include <asm/emulated_ops.h>
#include <asm/pgtable.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
+#include <asm/tm.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
void machine_check_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
int recover = 0;
__get_cpu_var(irq_stat).mce_exceptions++;
recover = cur_cpu_spec->machine_check(regs);
if (recover > 0)
- return;
+ goto bail;
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
/* the qspan pci read routines can cause machine checks -- Cort
* -- BenH
*/
bad_page_fault(regs, regs->dar, SIGBUS);
- return;
+ goto bail;
#endif
if (debugger_fault_handler(regs))
- return;
+ goto bail;
if (check_io_access(regs))
- return;
+ goto bail;
die("Machine check", regs, SIGBUS);
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable Machine check");
+
+bail:
+ exception_exit(prev_state);
}
void SMIException(struct pt_regs *regs)
void unknown_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, 0, 0);
+
+ exception_exit(prev_state);
}
void instruction_breakpoint_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- return;
+ goto bail;
if (debugger_iabr_match(regs))
- return;
+ goto bail;
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+
+bail:
+ exception_exit(prev_state);
}
void RunModeException(struct pt_regs *regs)
void __kprobes single_step_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
clear_single_step(regs);
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- return;
+ goto bail;
if (debugger_sstep(regs))
- return;
+ goto bail;
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+
+bail:
+ exception_exit(prev_state);
}
/*
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+ /* If we're emulating a load/store in an active transaction, we cannot
+ * emulate it as the kernel operates in transaction suspended context.
+ * We need to abort the transaction. This creates a persistent TM
+ * abort so tell the user what caused it with a new code.
+ */
+ if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+ tm_enable();
+ tm_abort(cause);
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+ return false;
+}
+#endif
+
static int emulate_instruction(struct pt_regs *regs)
{
u32 instword;
/* Emulate load/store string insn. */
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+ if (tm_abort_check(regs,
+ TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+ return -EINVAL;
PPC_WARN_EMULATED(string, regs);
return emulate_string_inst(regs, instword);
}
void __kprobes program_check_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
extern int do_mathemu(struct pt_regs *regs);
if (reason & REASON_FP) {
/* IEEE FP exception */
parse_fpe(regs);
- return;
+ goto bail;
}
if (reason & REASON_TRAP) {
/* Debugger is first in line to stop recursive faults in
* rcu_lock, notify_die, or atomic_notifier_call_chain */
if (debugger_bpt(regs))
- return;
+ goto bail;
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
- return;
+ goto bail;
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
- return;
+ goto bail;
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
- return;
+ goto bail;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) {
if (!user_mode(regs) &&
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
- return;
+ goto bail;
}
/* If usermode caused this, it's done something illegal and
* gets a SIGILL slap on the wrist. We call it an illegal
*/
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
- return;
+ goto bail;
} else {
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
"at %lx (msr 0x%x)\n", regs->nip, reason);
switch (do_mathemu(regs)) {
case 0:
emulate_single_step(regs);
- return;
+ goto bail;
case 1: {
int code = 0;
code = __parse_fpscr(current->thread.fpscr.val);
_exception(SIGFPE, regs, code, regs->nip);
- return;
+ goto bail;
}
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- return;
+ goto bail;
}
/* fall through on any other errors */
#endif /* CONFIG_MATH_EMULATION */
case 0:
regs->nip += 4;
emulate_single_step(regs);
- return;
+ goto bail;
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- return;
+ goto bail;
}
}
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
else
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+
+bail:
+ exception_exit(prev_state);
}
void alignment_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
int sig, code, fixed = 0;
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
+ if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+ goto bail;
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
emulate_single_step(regs);
- return;
+ goto bail;
}
/* Operand address was bad */
_exception(sig, regs, code, regs->dar);
else
bad_page_fault(regs, regs->dar, sig);
+
+bail:
+ exception_exit(prev_state);
}
void StackOverflow(struct pt_regs *regs)
void kernel_fp_unavailable_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
+
+ exception_exit(prev_state);
}
void altivec_unavailable_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return;
+ goto bail;
}
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
+
+bail:
+ exception_exit(prev_state);
}
void vsx_unavailable_exception(struct pt_regs *regs)
udbg_init_usbgecko();
#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
udbg_init_wsp();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
+ /* In memory console */
+ udbg_init_memcons();
#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
udbg_init_ehv_bc();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
case H_CPPR:
case H_EOI:
case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
ret = kvmppc_xics_hcall(vcpu, req);
break;
case H_CPPR:
case H_EOI:
case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu))
return kvmppc_h_pr_xics_hcall(vcpu, cmd);
break;
return H_SUCCESS;
}
+static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ union kvmppc_icp_state state;
+ struct kvmppc_icp *icp;
+
+ icp = vcpu->arch.icp;
+ if (icp->server_num != server) {
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+ }
+ state = ACCESS_ONCE(icp->state);
+ kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+ kvmppc_set_gpr(vcpu, 5, state.mfrr);
+ return H_SUCCESS;
+}
+
static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
union kvmppc_icp_state old_state, new_state;
if (!xics || !vcpu->arch.icp)
return H_HARDWARE;
+ /* These requests don't have real-mode implementations at present */
+ switch (req) {
+ case H_XIRR_X:
+ res = kvmppc_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 4, res);
+ kvmppc_set_gpr(vcpu, 5, get_tb());
+ return rc;
+ case H_IPOLL:
+ rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ return rc;
+ }
+
/* Check for real mode returning too hard */
if (xics->real_mode)
return kvmppc_xics_rm_complete(vcpu, req);
* aligned we don't need to clear the bottom 7 bits of either
* address.
*/
- ori r9,r3,1 /* stream=1 */
+ ori r9,r3,1 /* stream=1 => to */
#ifdef CONFIG_PPC_64K_PAGES
- lis r7,0x0E01 /* depth=7, units=512 */
+ lis r7,0x0E01 /* depth=7
+ * units/cachelines=512 */
#else
lis r7,0x0E00 /* depth=7 */
- ori r7,r7,0x1000 /* units=32 */
+ ori r7,r7,0x1000 /* units/cachelines=32 */
#endif
ori r10,r7,1 /* stream=1 */
.machine push
.machine "power4"
- dcbt r0,r4,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ /* setup read stream 0 */
+ dcbt r0,r4,0b01000 /* addr from */
+ dcbt r0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+ dcbtst r0,r9,0b01000 /* addr to */
+ dcbtst r0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt r0,r8,0b01010 /* all streams GO */
.machine pop
#ifdef CONFIG_ALTIVEC
.machine push
.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ /* setup read stream 0 */
+ dcbt r0,r6,0b01000 /* addr from */
+ dcbt r0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+ dcbtst r0,r9,0b01000 /* addr to */
+ dcbtst r0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt r0,r8,0b01010 /* all streams GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
#include <asm/firmware.h>
#include <asm/page.h>
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
+ enum ctx_state prev_state = exception_enter();
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
int fault;
+ int rc = 0;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
* look at it
*/
if (error_code & ICSWX_DSI_UCT) {
- int rc = acop_handle_fault(regs, address, error_code);
+ rc = acop_handle_fault(regs, address, error_code);
if (rc)
- return rc;
+ goto bail;
}
#endif /* CONFIG_PPC_ICSWX */
if (notify_page_fault(regs))
- return 0;
+ goto bail;
if (unlikely(debugger_fault_handler(regs)))
- return 0;
+ goto bail;
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (!user_mode(regs) && (address >= TASK_SIZE))
- return SIGSEGV;
+ if (!user_mode(regs) && (address >= TASK_SIZE)) {
+ rc = SIGSEGV;
+ goto bail;
+ }
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* breakpoint match */
do_break(regs, address, error_code);
- return 0;
+ goto bail;
}
#endif
local_irq_enable();
if (in_atomic() || mm == NULL) {
- if (!user_mode(regs))
- return SIGSEGV;
+ if (!user_mode(regs)) {
+ rc = SIGSEGV;
+ goto bail;
+ }
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
- int rc = mm_fault_error(regs, address, fault);
+ rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
- return rc;
+ goto bail;
+ else
+ rc = 0;
}
/*
}
up_read(&mm->mmap_sem);
- return 0;
+ goto bail;
bad_area:
up_read(&mm->mmap_sem);
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
- return 0;
+ goto bail;
}
if (is_exec && (error_code & DSISR_PROTFAULT))
" page (%lx) - exploit attempt? (uid: %d)\n",
address, from_kuid(&init_user_ns, current_uid()));
- return SIGSEGV;
+ rc = SIGSEGV;
+
+bail:
+ exception_exit(prev_state);
+ return rc;
}
hpte_v = hptep->v;
actual_psize = hpte_actual_psize(hptep, psize);
+ /*
+ * We need to invalidate the TLB always because hpte_remove doesn't do
+ * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+ * random entry from it. When we do that we don't invalidate the TLB
+ * (hpte_remove) because we assume the old translation is still
+ * technically "valid".
+ */
if (actual_psize < 0) {
- native_unlock_hpte(hptep);
- return -1;
+ actual_psize = psize;
+ ret = -1;
+ goto err_out;
}
- /* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v)) {
DBG_LOW(" -> miss\n");
ret = -1;
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
}
+err_out:
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
hptep = htab_address + slot;
actual_psize = hpte_actual_psize(hptep, psize);
if (actual_psize < 0)
- return;
+ actual_psize = psize;
/* Update the HPTE */
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
hpte_v = hptep->v;
actual_psize = hpte_actual_psize(hptep, psize);
+ /*
+ * We need to invalidate the TLB always because hpte_remove doesn't do
+ * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+ * random entry from it. When we do that we don't invalidate the TLB
+ * (hpte_remove) because we assume the old translation is still
+ * technically "valid".
+ */
if (actual_psize < 0) {
+ actual_psize = psize;
native_unlock_hpte(hptep);
- local_irq_restore(flags);
- return;
+ goto err_out;
}
- /* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v))
native_unlock_hpte(hptep);
else
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->v = 0;
+err_out:
/* Invalidate the TLB */
tlbie(vpn, psize, actual_psize, ssize, local);
-
local_irq_restore(flags);
}
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/memblock.h>
+#include <linux/context_tracking.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
*/
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
+ enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
unsigned long vsid;
struct mm_struct *mm;
mm = current->mm;
if (! mm) {
DBG_LOW(" user region with no mm !\n");
- return 1;
+ rc = 1;
+ goto bail;
}
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
/* Not a valid range
* Send the problem up to do_page_fault
*/
- return 1;
+ rc = 1;
+ goto bail;
}
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
/* Bad address. */
if (!vsid) {
DBG_LOW("Bad address!\n");
- return 1;
+ rc = 1;
+ goto bail;
}
/* Get pgdir */
pgdir = mm->pgd;
- if (pgdir == NULL)
- return 1;
+ if (pgdir == NULL) {
+ rc = 1;
+ goto bail;
+ }
/* Check CPU locality */
tmp = cpumask_of(smp_processor_id());
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
- return 1;
+ rc = 1;
+ goto bail;
}
/* Add _PAGE_PRESENT to the required access perm */
*/
if (access & ~pte_val(*ptep)) {
DBG_LOW(" no access !\n");
- return 1;
+ rc = 1;
+ goto bail;
}
#ifdef CONFIG_HUGETLB_PAGE
- if (hugeshift)
- return __hash_page_huge(ea, access, vsid, ptep, trap, local,
+ if (hugeshift) {
+ rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
ssize, hugeshift, psize);
+ goto bail;
+ }
#endif /* CONFIG_HUGETLB_PAGE */
#ifndef CONFIG_PPC_64K_PAGES
pte_val(*(ptep + PTRS_PER_PTE)));
#endif
DBG_LOW(" -> rc=%d\n", rc);
+
+bail:
+ exception_exit(prev_state);
return rc;
}
EXPORT_SYMBOL_GPL(hash_page);
*/
void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
{
+ enum ctx_state prev_state = exception_enter();
+
if (user_mode(regs)) {
#ifdef CONFIG_PPC_SUBPAGE_PROT
if (rc == -2)
_exception(SIGBUS, regs, BUS_ADRERR, address);
} else
bad_page_fault(regs, address, SIGBUS);
+
+ exception_exit(prev_state);
}
long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long phys)
{
int mapped = htab_bolt_mapping(start, start + page_size, phys,
- PAGE_KERNEL, mmu_vmemmap_psize,
+ pgprot_val(PAGE_KERNEL),
+ mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/uaccess.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
+#include <asm/code-patching.h>
#define BHRB_MAX_ENTRIES 32
#define BHRB_TARGET 0x0000000000000002
return 1;
}
+static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
+static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
+void power_pmu_flush_branch_stack(void) {}
+static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
#endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs)
{
- return !!(regs->result & 1);
+ return !!regs->result;
}
/*
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
- * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
- * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
+ * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
- unsigned long sdsync;
+ bool sdar_valid;
- if (ppmu->flags & PPMU_SIAR_VALID)
- sdsync = POWER7P_MMCRA_SDAR_VALID;
- else if (ppmu->flags & PPMU_ALT_SIPR)
- sdsync = POWER6_MMCRA_SDSYNC;
- else
- sdsync = MMCRA_SDSYNC;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ sdar_valid = regs->dar & SIER_SDAR_VALID;
+ else {
+ unsigned long sdsync;
+
+ if (ppmu->flags & PPMU_SIAR_VALID)
+ sdsync = POWER7P_MMCRA_SDAR_VALID;
+ else if (ppmu->flags & PPMU_ALT_SIPR)
+ sdsync = POWER6_MMCRA_SDSYNC;
+ else
+ sdsync = MMCRA_SDSYNC;
- if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ sdar_valid = mmcra & sdsync;
+ }
+
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
}
return !!(regs->dsisr & sipr);
}
-static bool regs_no_sipr(struct pt_regs *regs)
-{
- return !!(regs->result & 2);
-}
-
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
{
if (regs->msr & MSR_PR)
* SIAR which should give slightly more reliable
* results
*/
- if (regs_no_sipr(regs)) {
+ if (ppmu->flags & PPMU_NO_SIPR) {
unsigned long siar = mfspr(SPRN_SIAR);
if (siar >= PAGE_OFFSET)
return PERF_RECORD_MISC_KERNEL;
int use_siar;
regs->dsisr = mmcra;
- regs->result = 0;
-
- if (ppmu->flags & PPMU_NO_SIPR)
- regs->result |= 2;
-
- /*
- * On power8 if we're in random sampling mode, the SIER is updated.
- * If we're in continuous sampling mode, we don't have SIPR.
- */
- if (ppmu->flags & PPMU_HAS_SIER) {
- if (marked)
- regs->dar = mfspr(SPRN_SIER);
- else
- regs->result |= 2;
- }
+ if (ppmu->flags & PPMU_HAS_SIER)
+ regs->dar = mfspr(SPRN_SIER);
/*
* If this isn't a PMU exception (eg a software event) the SIAR is
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
- else if (!regs_no_sipr(regs) && regs_sipr(regs))
+ else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
use_siar = 0;
else
use_siar = 1;
- regs->result |= use_siar;
+ regs->result = use_siar;
}
/*
unsigned long mmcra = regs->dsisr;
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
- if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
- return mmcra & POWER7P_MMCRA_SIAR_VALID;
+ if (marked) {
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return regs->dar & SIER_SIAR_VALID;
+
+ if (ppmu->flags & PPMU_SIAR_VALID)
+ return mmcra & POWER7P_MMCRA_SIAR_VALID;
+ }
return 1;
}
+
+/* Reset all possible BHRB entries */
+static void power_pmu_bhrb_reset(void)
+{
+ asm volatile(PPC_CLRBHRB);
+}
+
+static void power_pmu_bhrb_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!ppmu->bhrb_nr)
+ return;
+
+ /* Clear BHRB if we changed task context to avoid data leaks */
+ if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
+ power_pmu_bhrb_reset();
+ cpuhw->bhrb_context = event->ctx;
+ }
+ cpuhw->bhrb_users++;
+}
+
+static void power_pmu_bhrb_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!ppmu->bhrb_nr)
+ return;
+
+ cpuhw->bhrb_users--;
+ WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+
+ if (!cpuhw->disabled && !cpuhw->bhrb_users) {
+ /* BHRB cannot be turned off when other
+ * events are active on the PMU.
+ */
+
+ /* avoid stale pointer */
+ cpuhw->bhrb_context = NULL;
+ }
+}
+
+/* Called from ctxsw to prevent one process's branch entries to
+ * mingle with the other process's entries during context switch.
+ */
+void power_pmu_flush_branch_stack(void)
+{
+ if (ppmu->bhrb_nr)
+ power_pmu_bhrb_reset();
+}
+/* Calculate the to address for a branch */
+static __u64 power_pmu_bhrb_to(u64 addr)
+{
+ unsigned int instr;
+ int ret;
+ __u64 target;
+
+ if (is_kernel_addr(addr))
+ return branch_target((unsigned int *)addr);
+
+ /* Userspace: need copy instruction here then translate it */
+ pagefault_disable();
+ ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
+ if (ret) {
+ pagefault_enable();
+ return 0;
+ }
+ pagefault_enable();
+
+ target = branch_target(&instr);
+ if ((!target) || (instr & BRANCH_ABSOLUTE))
+ return target;
+
+ /* Translate relative branch target from kernel to user address */
+ return target - (unsigned long)&instr + addr;
+}
+
+/* Processing BHRB entries */
+void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+{
+ u64 val;
+ u64 addr;
+ int r_index, u_index, pred;
+
+ r_index = 0;
+ u_index = 0;
+ while (r_index < ppmu->bhrb_nr) {
+ /* Assembly read function */
+ val = read_bhrb(r_index++);
+ if (!val)
+ /* Terminal marker: End of valid BHRB entries */
+ break;
+ else {
+ addr = val & BHRB_EA;
+ pred = val & BHRB_PREDICTION;
+
+ if (!addr)
+ /* invalid entry */
+ continue;
+
+ /* Branches are read most recent first (ie. mfbhrb 0 is
+ * the most recent branch).
+ * There are two types of valid entries:
+ * 1) a target entry which is the to address of a
+ * computed goto like a blr,bctr,btar. The next
+ * entry read from the bhrb will be branch
+ * corresponding to this target (ie. the actual
+ * blr/bctr/btar instruction).
+ * 2) a from address which is an actual branch. If a
+ * target entry proceeds this, then this is the
+ * matching branch for that target. If this is not
+ * following a target entry, then this is a branch
+ * where the target is given as an immediate field
+ * in the instruction (ie. an i or b form branch).
+ * In this case we need to read the instruction from
+ * memory to determine the target/to address.
+ */
+
+ if (val & BHRB_TARGET) {
+ /* Target branches use two entries
+ * (ie. computed gotos/XL form)
+ */
+ cpuhw->bhrb_entries[u_index].to = addr;
+ cpuhw->bhrb_entries[u_index].mispred = pred;
+ cpuhw->bhrb_entries[u_index].predicted = ~pred;
+
+ /* Get from address in next entry */
+ val = read_bhrb(r_index++);
+ addr = val & BHRB_EA;
+ if (val & BHRB_TARGET) {
+ /* Shouldn't have two targets in a
+ row.. Reset index and try again */
+ r_index--;
+ addr = 0;
+ }
+ cpuhw->bhrb_entries[u_index].from = addr;
+ } else {
+ /* Branches to immediate field
+ (ie I or B form) */
+ cpuhw->bhrb_entries[u_index].from = addr;
+ cpuhw->bhrb_entries[u_index].to =
+ power_pmu_bhrb_to(addr);
+ cpuhw->bhrb_entries[u_index].mispred = pred;
+ cpuhw->bhrb_entries[u_index].predicted = ~pred;
+ }
+ u_index++;
+
+ }
+ }
+ cpuhw->bhrb_stack.nr = u_index;
+ return;
+}
+
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
return n;
}
-/* Reset all possible BHRB entries */
-static void power_pmu_bhrb_reset(void)
-{
- asm volatile(PPC_CLRBHRB);
-}
-
-void power_pmu_bhrb_enable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
- if (!ppmu->bhrb_nr)
- return;
-
- /* Clear BHRB if we changed task context to avoid data leaks */
- if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
- power_pmu_bhrb_reset();
- cpuhw->bhrb_context = event->ctx;
- }
- cpuhw->bhrb_users++;
-}
-
-void power_pmu_bhrb_disable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
- if (!ppmu->bhrb_nr)
- return;
-
- cpuhw->bhrb_users--;
- WARN_ON_ONCE(cpuhw->bhrb_users < 0);
-
- if (!cpuhw->disabled && !cpuhw->bhrb_users) {
- /* BHRB cannot be turned off when other
- * events are active on the PMU.
- */
-
- /* avoid stale pointer */
- cpuhw->bhrb_context = NULL;
- }
-}
-
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
return 0;
}
-/* Called from ctxsw to prevent one process's branch entries to
- * mingle with the other process's entries during context switch.
- */
-void power_pmu_flush_branch_stack(void)
-{
- if (ppmu->bhrb_nr)
- power_pmu_bhrb_reset();
-}
-
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
.flush_branch_stack = power_pmu_flush_branch_stack,
};
-/* Processing BHRB entries */
-void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
-{
- u64 val;
- u64 addr;
- int r_index, u_index, target, pred;
-
- r_index = 0;
- u_index = 0;
- while (r_index < ppmu->bhrb_nr) {
- /* Assembly read function */
- val = read_bhrb(r_index);
-
- /* Terminal marker: End of valid BHRB entries */
- if (val == 0) {
- break;
- } else {
- /* BHRB field break up */
- addr = val & BHRB_EA;
- pred = val & BHRB_PREDICTION;
- target = val & BHRB_TARGET;
-
- /* Probable Missed entry: Not applicable for POWER8 */
- if ((addr == 0) && (target == 0) && (pred == 1)) {
- r_index++;
- continue;
- }
-
- /* Real Missed entry: Power8 based missed entry */
- if ((addr == 0) && (target == 1) && (pred == 1)) {
- r_index++;
- continue;
- }
-
- /* Reserved condition: Not a valid entry */
- if ((addr == 0) && (target == 1) && (pred == 0)) {
- r_index++;
- continue;
- }
-
- /* Is a target address */
- if (val & BHRB_TARGET) {
- /* First address cannot be a target address */
- if (r_index == 0) {
- r_index++;
- continue;
- }
-
- /* Update target address for the previous entry */
- cpuhw->bhrb_entries[u_index - 1].to = addr;
- cpuhw->bhrb_entries[u_index - 1].mispred = pred;
- cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
-
- /* Dont increment u_index */
- r_index++;
- } else {
- /* Update address, flags for current entry */
- cpuhw->bhrb_entries[u_index].from = addr;
- cpuhw->bhrb_entries[u_index].mispred = pred;
- cpuhw->bhrb_entries[u_index].predicted = ~pred;
-
- /* Successfully popullated one entry */
- u_index++;
- r_index++;
- }
- }
- }
- cpuhw->bhrb_stack.nr = u_index;
- return;
-}
-
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
config RTAS_PROC
bool "Proc interface to RTAS"
- depends on PPC_RTAS
+ depends on PPC_RTAS && PROC_FS
default y
config RTAS_FLASH
select PPC_ICP_NATIVE
select PPC_P7_NAP
select PPC_PCI_CHOICE if EMBEDDED
+ select EPAPR_BOOT
default y
config POWERNV_MSI
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <asm/opal.h>
#include <asm/firmware.h>
static struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
extern u64 opal_mc_secondary_handler[];
+static unsigned int *opal_irqs;
+static unsigned int opal_irq_count;
int __init early_init_dt_scan_opal(unsigned long node,
const char *uname, int depth, void *data)
opal.entry, entryp, entrysz);
powerpc_firmware_features |= FW_FEATURE_OPAL;
- if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
+ if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
+ powerpc_firmware_features |= FW_FEATURE_OPALv2;
+ powerpc_firmware_features |= FW_FEATURE_OPALv3;
+ printk("OPAL V3 detected !\n");
+ } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
powerpc_firmware_features |= FW_FEATURE_OPALv2;
printk("OPAL V2 detected !\n");
} else {
rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
len = total_len;
rc = opal_console_write(vtermno, &len, data);
+
+ /* Closed or other error drop */
+ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
+ rc != OPAL_BUSY_EVENT) {
+ written = total_len;
+ break;
+ }
if (rc == OPAL_SUCCESS) {
total_len -= len;
data += len;
irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
pr_debug("opal: Found %d interrupts reserved for OPAL\n",
irqs ? (irqlen / 4) : 0);
+ opal_irq_count = irqlen / 4;
+ opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
unsigned int hwirq = be32_to_cpup(irqs);
unsigned int irq = irq_create_mapping(NULL, hwirq);
if (rc)
pr_warning("opal: Error %d requesting irq %d"
" (0x%x)\n", rc, irq, hwirq);
+ opal_irqs[i] = irq;
}
return 0;
}
subsys_initcall(opal_init);
+
+void opal_shutdown(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < opal_irq_count; i++) {
+ if (opal_irqs[i])
+ free_irq(opal_irqs[i], 0);
+ opal_irqs[i] = 0;
+ }
+}
define_pe_printk_level(pe_warn, KERN_WARNING);
define_pe_printk_level(pe_info, KERN_INFO);
-static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
-{
- struct device_node *np;
-
- np = pci_device_to_OF_node(dev);
- if (!np)
- return NULL;
- return PCI_DN(np);
-}
-
static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
unsigned long pe;
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
if (!pdn)
return NULL;
/* Add to all parents PELT-V */
while (parent) {
- struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
+ struct pci_dn *pdn = pci_get_pdn(parent);
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
struct pnv_ioda_pe *pe;
int pe_num;
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
if (pdn == NULL) {
pr_warn("%s: No device node associated with device !\n",
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
{
- struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
+ struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
/*
unsigned int is_64, struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
struct irq_data *idata;
struct irq_chip *ichip;
unsigned int xive_num = hwirq - phb->msi_base;
if (pe->mve_number < 0)
return -ENXIO;
+ /* Force 32-bit MSI on some broken devices */
+ if (pdn && pdn->force_32bit_msi)
+ is_64 = 0;
+
/* Assign XIVE to PE */
rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
if (rc) {
if (!phb->initialized)
return 0;
- pdn = pnv_ioda_get_pdn(dev);
+ pdn = pci_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return -EINVAL;
return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
}
+static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
+{
+ opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
+ OPAL_ASSERT_RESET);
+}
+
void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
{
struct pci_controller *hose;
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+ /* Setup shutdown function for kexec */
+ phb->shutdown = pnv_pci_ioda_shutdown;
+
/* Setup MSI support */
pnv_pci_init_ioda_msis(phb);
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+
+ if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+ return -ENODEV;
return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
}
while (npages--)
*(tcep++) = 0;
- if (tbl->it_type & TCE_PCI_SWINV_CREATE)
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
}
pnv_pci_dma_fallback_setup(hose, pdev);
}
+void pnv_pci_shutdown(void)
+{
+ struct pci_controller *hose;
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->shutdown)
+ phb->shutdown(phb);
+ }
+}
+
/* Fixup wrong class code in p7ioc and p8 root complex */
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
{
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
+ void (*shutdown)(struct pnv_phb *phb);
union {
struct {
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
u64 *startp, u64 *endp);
+
#endif /* __POWERNV_PCI_H */
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
+extern void pnv_pci_shutdown(void);
#else
static inline void pnv_pci_init(void) { }
+static inline void pnv_pci_shutdown(void) { }
#endif
#endif /* _POWERNV_H */
if (root)
model = of_get_property(root, "model", NULL);
seq_printf(m, "machine\t\t: PowerNV %s\n", model);
- if (firmware_has_feature(FW_FEATURE_OPALv2))
+ if (firmware_has_feature(FW_FEATURE_OPALv3))
+ seq_printf(m, "firmware\t: OPAL v3\n");
+ else if (firmware_has_feature(FW_FEATURE_OPALv2))
seq_printf(m, "firmware\t: OPAL v2\n");
else if (firmware_has_feature(FW_FEATURE_OPAL))
seq_printf(m, "firmware\t: OPAL v1\n");
{
}
+static void pnv_shutdown(void)
+{
+ /* Let the PCI code clear up IODA tables */
+ pnv_pci_shutdown();
+
+ /* And unregister all OPAL interrupts so they don't fire
+ * up while we kexec
+ */
+ opal_shutdown();
+}
+
#ifdef CONFIG_KEXEC
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
.init_IRQ = pnv_init_IRQ,
.show_cpuinfo = pnv_show_cpuinfo,
.progress = pnv_progress,
+ .machine_shutdown = pnv_shutdown,
.power_save = power7_idle,
.calibrate_decr = generic_calibrate_decr,
#ifdef CONFIG_KEXEC
BUG_ON(nr < 0 || nr >= NR_CPUS);
- /* On OPAL v2 the CPU are still spinning inside OPAL itself,
- * get them back now
+ /*
+ * If we already started or OPALv2 is not supported, we just
+ * kick the CPU via the PACA
*/
- if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
- pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
- rc = opal_start_cpu(pcpu, start_here);
+ if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+ goto kick;
+
+ /*
+ * At this point, the CPU can either be spinning on the way in
+ * from kexec or be inside OPAL waiting to be started for the
+ * first time. OPAL v3 allows us to query OPAL to know if it
+ * has the CPUs, so we do that
+ */
+ if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ uint8_t status;
+
+ rc = opal_query_cpu_status(pcpu, &status);
if (rc != OPAL_SUCCESS) {
- pr_warn("OPAL Error %ld starting CPU %d\n",
+ pr_warn("OPAL Error %ld querying CPU %d state\n",
rc, nr);
return -ENODEV;
}
+
+ /*
+ * Already started, just kick it, probably coming from
+ * kexec and spinning
+ */
+ if (status == OPAL_THREAD_STARTED)
+ goto kick;
+
+ /*
+ * Available/inactive, let's kick it
+ */
+ if (status == OPAL_THREAD_INACTIVE) {
+ pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
+ nr, pcpu);
+ rc = opal_start_cpu(pcpu, start_here);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("OPAL Error %ld starting CPU %d\n",
+ rc, nr);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * An unavailable CPU (or any other unknown status)
+ * shouldn't be started. It should also
+ * not be in the possible map but currently it can
+ * happen
+ */
+ pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+ " (status %d)...\n", nr, pcpu, status);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * On OPAL v2, we just kick it and hope for the best,
+ * we must not test the error from opal_start_cpu() or
+ * we would fail to get CPUs from kexec.
+ */
+ opal_start_cpu(pcpu, start_here);
}
+ kick:
return smp_generic_kick_cpu(nr);
}
select PPC_PCI_CHOICE if EXPERT
select ZLIB_DEFLATE
select PPC_DOORBELL
+ select HAVE_CONTEXT_TRACKING
+ select HOTPLUG if SMP
+ select HOTPLUG_CPU if SMP
default y
config PPC_SPLPAR
#define RTAS_CHANGE_MSIX_FN 4
#define RTAS_CHANGE_32MSI_FN 5
-static struct pci_dn *get_pdn(struct pci_dev *pdev)
-{
- struct device_node *dn;
- struct pci_dn *pdn;
-
- dn = pci_device_to_OF_node(pdev);
- if (!dn) {
- dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
- return NULL;
- }
-
- pdn = PCI_DN(dn);
- if (!pdn) {
- dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
- return NULL;
- }
-
- return pdn;
-}
-
/* RTAS Helpers */
static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
{
struct pci_dn *pdn;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return;
struct pci_dn *pdn;
const u32 *req_msi;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return -ENODEV;
return 0;
}
+static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
+{
+ u32 addr_hi, addr_lo;
+
+ /*
+ * We should only get in here for IODA1 configs. This is based on the
+ * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
+ * support, and we are in a PCIe Gen2 slot.
+ */
+ dev_info(&pdev->dev,
+ "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
+ pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
+ addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
+}
+
static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
{
struct pci_dn *pdn;
struct msi_desc *entry;
struct msi_msg msg;
int nvec = nvec_in;
+ int use_32bit_msi_hack = 0;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return -ENODEV;
*/
again:
if (type == PCI_CAP_ID_MSI) {
- if (pdn->force_32bit_msi)
+ if (pdn->force_32bit_msi) {
rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
- else
+ if (rc < 0) {
+ /*
+ * We only want to run the 32 bit MSI hack below if
+ * the max bus speed is Gen2 speed
+ */
+ if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
+ return rc;
+
+ use_32bit_msi_hack = 1;
+ }
+ } else
+ rc = -1;
+
+ if (rc < 0)
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
- if (rc < 0 && !pdn->force_32bit_msi) {
+ if (rc < 0) {
pr_debug("rtas_msi: trying the old firmware call.\n");
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
}
+
+ if (use_32bit_msi_hack && rc > 0)
+ rtas_hack_32bit_msi_gen2(pdev);
} else
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
}
arch_initcall(rtas_msi_init);
-static void quirk_radeon(struct pci_dev *dev)
-{
- struct pci_dn *pdn = get_pdn(dev);
-
- if (pdn)
- pdn->force_32bit_msi = 1;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/stat.h>
struct device_attribute *attr,
const char *buf, size_t count)
{
+ cpumask_var_t offline_mask;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
stream_id = simple_strtoul(buf, NULL, 16);
do {
} while (rc == -EAGAIN);
if (!rc) {
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask,
+ cpu_online_mask);
+ rc = rtas_online_cpus_mask(offline_mask);
+ if (rc) {
+ pr_err("%s: Could not bring present CPUs online.\n",
+ __func__);
+ goto out;
+ }
+
stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
start_topology_update();
+
+ /* Take down CPUs not online prior to suspend */
+ if (!rtas_offline_cpus_mask(offline_mask))
+ pr_warn("%s: Could not restore CPUs to offline "
+ "state.\n", __func__);
}
stream_id = 0;
if (!rc)
rc = count;
+out:
+ free_cpumask_var(offline_mask);
return rc;
}
xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
wsp_ics_set_xive(ics, hw_irq, xive);
- return 0;
+ return IRQ_SET_MASK_OK;
}
static struct irq_chip wsp_irq_chip = {
obj-$(CONFIG_PPC_SCOM) += scom.o
+obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
+
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PPC_XICS) += xics/
ev_int_set_config(src, config, prio, cpuid);
spin_unlock_irqrestore(&ehv_pic_lock, flags);
- return 0;
+ return IRQ_SET_MASK_OK;
}
static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
#ifdef CONFIG_PPC32 /* XXX for now */
#ifdef CONFIG_IRQ_ALL_CPUS
-#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU))
+#define distribute_irqs (1)
#else
#define distribute_irqs (0)
#endif
mpic_physmask(mask));
}
- return 0;
+ return IRQ_SET_MASK_OK;
}
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
* it differently, then we should make sure we also change the default
* values of irq_desc[].affinity in irq.c.
*/
- if (distribute_irqs) {
+ if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
--- /dev/null
+/*
+ * A udbg backend which logs messages and reads input from in memory
+ * buffers.
+ *
+ * The console output can be read from memcons_output which is a
+ * circular buffer whose next write position is stored in memcons.output_pos.
+ *
+ * Input may be passed by writing into the memcons_input buffer when it is
+ * empty. The input buffer is empty when both input_pos == input_start and
+ * *input_start == '\0'.
+ *
+ * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
+ * Copyright (C) 2013 Alistair Popple, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/barrier.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/udbg.h>
+
+struct memcons {
+ char *output_start;
+ char *output_pos;
+ char *output_end;
+ char *input_start;
+ char *input_pos;
+ char *input_end;
+};
+
+static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
+static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
+
+struct memcons memcons = {
+ .output_start = memcons_output,
+ .output_pos = memcons_output,
+ .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
+ .input_start = memcons_input,
+ .input_pos = memcons_input,
+ .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
+};
+
+void memcons_putc(char c)
+{
+ char *new_output_pos;
+
+ *memcons.output_pos = c;
+ wmb();
+ new_output_pos = memcons.output_pos + 1;
+ if (new_output_pos >= memcons.output_end)
+ new_output_pos = memcons.output_start;
+
+ memcons.output_pos = new_output_pos;
+}
+
+int memcons_getc_poll(void)
+{
+ char c;
+ char *new_input_pos;
+
+ if (*memcons.input_pos) {
+ c = *memcons.input_pos;
+
+ new_input_pos = memcons.input_pos + 1;
+ if (new_input_pos >= memcons.input_end)
+ new_input_pos = memcons.input_start;
+ else if (*new_input_pos == '\0')
+ new_input_pos = memcons.input_start;
+
+ *memcons.input_pos = '\0';
+ wmb();
+ memcons.input_pos = new_input_pos;
+ return c;
+ }
+
+ return -1;
+}
+
+int memcons_getc(void)
+{
+ int c;
+
+ while (1) {
+ c = memcons_getc_poll();
+ if (c == -1)
+ cpu_relax();
+ else
+ break;
+ }
+
+ return c;
+}
+
+void udbg_init_memcons(void)
+{
+ udbg_putc = memcons_putc;
+ udbg_getc = memcons_getc;
+ udbg_getc_poll = memcons_getc_poll;
+}
__func__, d->irq, hw_irq, server, rc);
return -1;
}
- return 0;
+ return IRQ_SET_MASK_OK;
}
static struct irq_chip ics_opal_irq_chip = {
select CLONE_BACKWARDS2
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP
- select GENERIC_KERNEL_THREAD
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
return 0;
}
if (!write) {
- len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
+ strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
+ ARRAY_SIZE(buf));
+ len = strnlen(buf, ARRAY_SIZE(buf));
if (len > *lenp)
len = *lenp;
if (copy_to_user(buffer, buf, len))
return 0;
}
if (!write) {
- len = sprintf(buf, ops->active ? "1\n" : "0\n");
+ strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
+ len = strnlen(buf, ARRAY_SIZE(buf));
if (len > *lenp)
len = *lenp;
if (copy_to_user(buffer, buf, len)) {
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
}
#endif /* _ASM_S390_DMA_MAPPING_H */
#define MCOUNT_ADDR ((long)_mcount)
-#ifdef CONFIG_64BIT
-#define MCOUNT_INSN_SIZE 12
-#else
-#define MCOUNT_INSN_SIZE 20
-#endif
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
}
#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_64BIT
+#define MCOUNT_INSN_SIZE 12
+#else
+#define MCOUNT_INSN_SIZE 22
+#endif
+
#endif /* _ASM_S390_FTRACE_H */
}
void *xlate_dev_mem_ptr(unsigned long phys);
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
/*
void storage_key_init_range(unsigned long start, unsigned long end);
-static unsigned long pfmf(unsigned long function, unsigned long address)
+static inline unsigned long pfmf(unsigned long function, unsigned long address)
{
asm volatile(
" .insn rre,0xb9af0000,%[function],%[address]"
static inline void clear_page(void *page)
{
- if (MACHINE_HAS_PFMF) {
- pfmf(0x10000, (unsigned long)page);
- } else {
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1)
- : "memory", "cc");
- }
+ register unsigned long reg1 asm ("1") = 0;
+ register void *reg2 asm ("2") = page;
+ register unsigned long reg3 asm ("3") = 4096;
+ asm volatile(
+ " mvcl 2,0"
+ : "+d" (reg2), "+d" (reg3) : "d" (reg1)
+ : "memory", "cc");
}
static inline void copy_page(void *to, void *from)
#define RCP_HC_BIT 0x00200000UL
#define RCP_GR_BIT 0x00040000UL
#define RCP_GC_BIT 0x00020000UL
-#define RCP_IN_BIT 0x00008000UL /* IPTE notify bit */
+#define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT 0x00008000UL
#define RCP_HC_BIT 0x0020000000000000UL
#define RCP_GR_BIT 0x0004000000000000UL
#define RCP_GC_BIT 0x0002000000000000UL
-#define RCP_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
+#define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT 0x0000800000000000UL
unsigned long address, bits;
unsigned char skey;
- if (!pte_present(*ptep))
+ if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
address = pte_val(*ptep) & PAGE_MASK;
skey = page_get_storage_key(address);
#ifdef CONFIG_PGSTE
int young;
- if (!pte_present(*ptep))
+ if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
/* Get referenced bit from storage key */
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
unsigned long address;
unsigned long okey, nkey;
- if (!pte_present(entry))
+ if (pte_val(entry) & _PAGE_INVALID)
return;
address = pte_val(entry) & PAGE_MASK;
okey = nkey = page_get_storage_key(address);
pte = *ptep;
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
+
+ if (mm_has_pgste(mm))
+ pgste = pgste_update_all(&pte, pgste);
return pte;
}
unsigned long address,
pte_t *ptep, pte_t pte)
{
+ pgste_t pgste;
+
if (mm_has_pgste(mm)) {
+ pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+ pgste_set_key(ptep, pgste, pte);
pgste_set_pte(ptep, pte);
- pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
+ pgste_set_unlock(ptep, pgste);
} else
*ptep = pte;
}
while (len) {
ptr = buffer;
opsize = insn_length(*code);
+ if (opsize > len)
+ break;
ptr += sprintf(ptr, "%p: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
-#ifdef CONFIG_64BIT
-#define MCOUNT_OFFSET_RET 12
-#else
-#define MCOUNT_OFFSET_RET 22
-#endif
-
#ifdef CONFIG_DYNAMIC_FTRACE
void ftrace_disable_code(void);
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
goto out;
+ ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
- trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
+ trace.func = ip;
/* Only trace if the calling function expects to. */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
.section .kprobes.text, "ax"
la %r2,0(%r14)
st %r0,__SF_BACKCHAIN(%r15)
la %r3,0(%r3)
+ ahi %r2,-MCOUNT_INSN_SIZE
l %r14,0b-0b(%r1)
l %r14,0(%r14)
basr %r14,%r14
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
.section .kprobes.text, "ax"
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
+ aghi %r2,-MCOUNT_INSN_SIZE
larl %r14,ftrace_trace_function
lg %r14,0(%r14)
basr %r14,%r14
* This is the main routine where commands issued by other
* cpus are handled.
*/
-static void do_ext_call_interrupt(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
+static void smp_handle_ext_call(void)
{
unsigned long bits;
- int cpu;
-
- cpu = smp_processor_id();
- if (ext_code.code == 0x1202)
- inc_irq_stat(IRQEXT_EXC);
- else
- inc_irq_stat(IRQEXT_EMS);
- /*
- * handle bit signal external calls
- */
- bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
+ /* handle bit signal external calls */
+ bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
if (test_bit(ec_stop_cpu, &bits))
smp_stop_cpu();
-
if (test_bit(ec_schedule, &bits))
scheduler_ipi();
-
if (test_bit(ec_call_function, &bits))
generic_smp_call_function_interrupt();
-
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
+}
+static void do_ext_call_interrupt(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
+{
+ inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
+ smp_handle_ext_call();
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
continue;
pcpu = pcpu_devices + cpu;
pcpu->address = info->cpu[i].address;
- pcpu->state = (cpu >= info->configured) ?
+ pcpu->state = (i >= info->configured) ?
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
{
unsigned long cregs[16];
+ /* Handle possible pending IPIs */
+ smp_handle_ext_call();
set_cpu_online(smp_processor_id(), false);
/* Disable pseudo page faults on this cpu. */
pfault_fini();
mp = (struct gmap_pgtable *) page->index;
rmap->gmap = gmap;
rmap->entry = segment_ptr;
- rmap->vmaddr = address;
+ rmap->vmaddr = address & PMD_MASK;
spin_lock(&mm->page_table_lock);
if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper);
break;
}
/* Get the page mapped */
- if (get_user_pages(current, gmap->mm, addr, 1, 1, 0,
- NULL, NULL) != 1) {
+ if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
rc = -EFAULT;
break;
}
unsigned long empty_zero_page;
EXPORT_SYMBOL_GPL(empty_zero_page);
-static struct kcore_list kcore_mem, kcore_vmalloc;
-
static void setup_zero_page(void)
{
struct page *page;
select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
select GENERIC_TIME_VSYSCALL if X86_64
select KTIME_SCALAR if X86_32
- select ALWAYS_USE_PERSISTENT_CLOCK
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
pand %xmm3, %xmm1
PCLMULQDQ 0x00, CONSTANT, %xmm1
pxor %xmm2, %xmm1
- pextrd $0x01, %xmm1, %eax
+ PEXTRD 0x01, %xmm1, %eax
ret
ENDPROC(crc32_pclmul_le_16)
_INP_END_SIZE = 8
_INP_SIZE = 8
-_XFER_SIZE = 8
+_XFER_SIZE = 16
_XMM_SAVE_SIZE = 0
_INP_END = 0
_INP_END_SIZE = 8
_INP_SIZE = 8
-_XFER_SIZE = 8
+_XFER_SIZE = 16
_XMM_SAVE_SIZE = 0
_INP_END = 0
#define REG_NUM_INVALID 100
-#define REG_TYPE_R64 0
-#define REG_TYPE_XMM 1
+#define REG_TYPE_R32 0
+#define REG_TYPE_R64 1
+#define REG_TYPE_XMM 2
#define REG_TYPE_INVALID 100
+ .macro R32_NUM opd r32
+ \opd = REG_NUM_INVALID
+ .ifc \r32,%eax
+ \opd = 0
+ .endif
+ .ifc \r32,%ecx
+ \opd = 1
+ .endif
+ .ifc \r32,%edx
+ \opd = 2
+ .endif
+ .ifc \r32,%ebx
+ \opd = 3
+ .endif
+ .ifc \r32,%esp
+ \opd = 4
+ .endif
+ .ifc \r32,%ebp
+ \opd = 5
+ .endif
+ .ifc \r32,%esi
+ \opd = 6
+ .endif
+ .ifc \r32,%edi
+ \opd = 7
+ .endif
+#ifdef CONFIG_X86_64
+ .ifc \r32,%r8d
+ \opd = 8
+ .endif
+ .ifc \r32,%r9d
+ \opd = 9
+ .endif
+ .ifc \r32,%r10d
+ \opd = 10
+ .endif
+ .ifc \r32,%r11d
+ \opd = 11
+ .endif
+ .ifc \r32,%r12d
+ \opd = 12
+ .endif
+ .ifc \r32,%r13d
+ \opd = 13
+ .endif
+ .ifc \r32,%r14d
+ \opd = 14
+ .endif
+ .ifc \r32,%r15d
+ \opd = 15
+ .endif
+#endif
+ .endm
+
.macro R64_NUM opd r64
\opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
.ifc \r64,%rax
\opd = 0
.endif
.ifc \r64,%r15
\opd = 15
.endif
+#endif
.endm
.macro XMM_NUM opd xmm
.endm
.macro REG_TYPE type reg
+ R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> REG_NUM_INVALID
\type = REG_TYPE_R64
+ .elseif reg_type_r32 <> REG_NUM_INVALID
+ \type = REG_TYPE_R32
.elseif reg_type_xmm <> REG_NUM_INVALID
\type = REG_TYPE_XMM
.else
.byte \imm8
.endm
+ .macro PEXTRD imm8 xmm gpr
+ R32_NUM extrd_opd1 \gpr
+ XMM_NUM extrd_opd2 \xmm
+ PFX_OPD_SIZE
+ PFX_REX extrd_opd1 extrd_opd2
+ .byte 0x0f, 0x3a, 0x16
+ MODRM 0xc0 extrd_opd1 extrd_opd2
+ .byte \imm8
+ .endm
+
.macro AESKEYGENASSIST rcon xmm1 xmm2
XMM_NUM aeskeygen_opd1 \xmm1
XMM_NUM aeskeygen_opd2 \xmm2
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
static unsigned int __initdata next_early_pgt = 2;
-pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
+pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
movq %rdi, %rax
shrq $PUD_SHIFT, %rax
andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, (4096+0)(%rbx,%rax,8)
- movq %rdx, (4096+8)(%rbx,%rax,8)
+ movq %rdx, 4096(%rbx,%rax,8)
+ incl %eax
+ andl $(PTRS_PER_PUD-1), %eax
+ movq %rdx, 4096(%rbx,%rax,8)
addq $8192, %rbx
movq %rdi, %rax
/*
* Were we in an interrupt that interrupted kernel mode?
*
- * For now, with eagerfpu we will return interrupted kernel FPU
- * state as not-idle. TBD: Ideally we can change the return value
- * to something like __thread_has_fpu(current). But we need to
- * be careful of doing __thread_clear_has_fpu() before saving
- * the FPU etc for supporting nested uses etc. For now, take
- * the simple route!
- *
* On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
* pair does nothing at all: the thread must not have fpu (so
* that we don't try to save the FPU state), and TS must
* be set (so that the clts/stts pair does nothing that is
* visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return 1 unless we've already
+ * been eager and saved the state in kernel_fpu_begin().
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
if (use_eager_fpu())
- return 0;
+ return __thread_has_fpu(current);
return !__thread_has_fpu(current) &&
(read_cr0() & X86_CR0_TS);
struct task_struct *me = current;
if (__thread_has_fpu(me)) {
- __save_init_fpu(me);
__thread_clear_has_fpu(me);
+ __save_init_fpu(me);
/* We do 'stts()' in __kernel_fpu_end() */
} else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL);
#endif
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+static DEFINE_MUTEX(x86_cpu_microcode_mutex);
/*
* Save this mc into mc_saved_data. So it will be loaded early when a CPU is
* hot added or resumes.
* Hold hotplug lock so mc_saved_data is not accessed by a CPU in
* hotplug.
*/
- cpu_hotplug_driver_lock();
+ mutex_lock(&x86_cpu_microcode_mutex);
mc_saved_count_init = mc_saved_data.mc_saved_count;
mc_saved_count = mc_saved_data.mc_saved_count;
}
out:
- cpu_hotplug_driver_unlock();
+ mutex_unlock(&x86_cpu_microcode_mutex);
return ret;
}
{
if (cpuidle_idle_call())
x86_idle();
+ else
+ local_irq_enable();
}
/*
*/
static void amd_e400_idle(void)
{
- if (need_resched())
- return;
-
if (!amd_e400_c1e_detected) {
u32 lo, hi;
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3) {
+ int highbyte_regs = ctxt->rex_prefix == 0;
+
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
- op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp);
+ op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
+ highbyte_regs && (ctxt->d & ByteOp));
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM), N, N,
/* 0x10 - 0x1F */
- N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N,
+ D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
+ case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
{
struct kvm_lapic *apic = vcpu->arch.apic;
unsigned int sipi_vector;
+ unsigned long pe;
- if (!kvm_vcpu_has_lapic(vcpu))
+ if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
return;
- if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
+ pe = xchg(&apic->pending_events, 0);
+
+ if (test_bit(KVM_APIC_INIT, &pe)) {
kvm_lapic_reset(vcpu);
kvm_vcpu_reset(vcpu);
if (kvm_vcpu_is_bsp(apic->vcpu))
else
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
}
- if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) &&
+ if (test_bit(KVM_APIC_SIPI, &pe) &&
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
/* evaluate pending_events before reading the vector */
smp_rmb();
}
/*
- * would have hole in the middle or ends, and only ram parts will be mapped.
+ * We need to iterate through the E820 memory map and create direct mappings
+ * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
+ * create direct mappings for all pfns from [0 to max_low_pfn) and
+ * [4GB to max_pfn) because of possible memory holes in high addresses
+ * that cannot be marked as UC by fixed/variable range MTRRs.
+ * Depending on the alignment of E820 ranges, this may possibly result
+ * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
+ *
+ * init_mem_mapping() calls init_range_memory_mapping() with big range.
+ * That range would have hole in the middle or ends, and only ram parts
+ * will be mapped in init_range_memory_mapping().
*/
static unsigned long __init init_range_memory_mapping(
unsigned long r_start,
max_pfn_mapped = 0; /* will get exact value next */
min_pfn_mapped = real_end >> PAGE_SHIFT;
last_start = start = real_end;
+
+ /*
+ * We start from the top (end of memory) and go to the bottom.
+ * The memblock_find_in_range() gets us a block of RAM from the
+ * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
+ * for page table.
+ */
while (last_start > ISA_END_ADDRESS) {
if (last_start > step_size) {
start = round_down(last_start - 1, step_size);
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- data = phys_to_virt(pa_data);
+ data = ioremap(pa_data, sizeof(*rom));
+ if (!data)
+ return -ENOMEM;
if (data->type == SETUP_PCI) {
rom = (struct pci_setup_rom *)data;
}
}
pa_data = data->next;
+ iounmap(data);
}
return 0;
}
*/
static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
{
- if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
- || devfn == PCI_DEVFN(0, 0)
- || devfn == PCI_DEVFN(3, 0)))
- return 1;
-
/* This is a workaround for A0 LNC bug where PCI status register does
* not have new CAP bit set. can not be written by SW either.
*
*/
if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
return 0;
-
+ if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
+ || devfn == PCI_DEVFN(0, 0)
+ || devfn == PCI_DEVFN(3, 0)))
+ return 1;
return 0; /* langwell on others */
}
}
if (boot_used_size && !finished) {
- unsigned long size;
+ unsigned long size = 0;
u32 attr;
efi_status_t s;
void *tmp;
{
unsigned cpu;
unsigned int this_cpu = smp_processor_id();
+ int xen_vector = xen_map_vector(vector);
- if (!(num_online_cpus() > 1))
+ if (!(num_online_cpus() > 1) || (xen_vector < 0))
return;
for_each_cpu_and(cpu, mask, cpu_online_mask) {
if (this_cpu == cpu)
continue;
- xen_smp_send_call_function_single_ipi(cpu);
+ xen_send_IPI_one(cpu, xen_vector);
}
}
void xen_send_IPI_allbutself(int vector)
{
- int xen_vector = xen_map_vector(vector);
-
- if (xen_vector >= 0)
- xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
+ xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
int vector);
extern void xen_send_IPI_allbutself(int vector);
-extern void physflat_send_IPI_allbutself(int vector);
extern void xen_send_IPI_all(int vector);
extern void xen_send_IPI_self(int vector);
# Power management related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_PM) += device_pm.o
+acpi-y += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
-acpi-y += csrt.o
acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o
acpi-y += acpi_platform.o
acpi-y += power.o
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/delay.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+static int ac_sleep_before_get_state_ms;
+
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
+ /*
+ * A buggy BIOS may notify AC first and then sleep for
+ * a specific time before doing actual operations in the
+ * EC event handler (_Qxx). This will cause the AC state
+ * reported by the ACPI event to be incorrect, so wait for a
+ * specific time for the EC event handler to make progress.
+ */
+ if (ac_sleep_before_get_state_ms > 0)
+ msleep(ac_sleep_before_get_state_ms);
+
acpi_ac_get_state(ac);
acpi_bus_generate_proc_event(device, event, (u32) ac->state);
acpi_bus_generate_netlink_event(device->pnp.device_class,
return;
}
+static int thinkpad_e530_quirk(const struct dmi_system_id *d)
+{
+ ac_sleep_before_get_state_ms = 1000;
+ return 0;
+}
+
+static struct dmi_system_id ac_dmi_table[] = {
+ {
+ .callback = thinkpad_e530_quirk,
+ .ident = "thinkpad e530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
+ },
+ },
+ {},
+};
+
static int acpi_ac_add(struct acpi_device *device)
{
int result = 0;
kfree(ac);
}
+ dmi_check_system(ac_dmi_table);
return result;
}
struct lpss_device_desc {
bool clk_required;
- const char *clk_parent;
+ const char *clkdev_name;
bool ltr_required;
unsigned int prv_offset;
};
+static struct lpss_device_desc lpss_dma_desc = {
+ .clk_required = true,
+ .clkdev_name = "hclk",
+};
+
struct lpss_private_data {
void __iomem *mmio_base;
resource_size_t mmio_size;
static struct lpss_device_desc lpt_dev_desc = {
.clk_required = true,
- .clk_parent = "lpss_clk",
.prv_offset = 0x800,
.ltr_required = true,
};
};
static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ /* Generic LPSS devices */
+ { "INTL9C60", (unsigned long)&lpss_dma_desc },
+
/* Lynxpoint LPSS devices */
{ "INT33C0", (unsigned long)&lpt_dev_desc },
{ "INT33C1", (unsigned long)&lpt_dev_desc },
struct lpss_private_data *pdata)
{
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
+ struct lpss_clk_data *clk_data;
if (!lpss_clk_dev)
lpt_register_clock_device();
- if (!dev_desc->clk_parent || !pdata->mmio_base
+ clk_data = platform_get_drvdata(lpss_clk_dev);
+ if (!clk_data)
+ return -ENODEV;
+
+ if (dev_desc->clkdev_name) {
+ clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
+ dev_name(&adev->dev));
+ return 0;
+ }
+
+ if (!pdata->mmio_base
|| pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
return -ENODATA;
pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
- dev_desc->clk_parent, 0,
+ clk_data->name, 0,
pdata->mmio_base + dev_desc->prv_offset,
0, 0, NULL);
if (IS_ERR(pdata->clk))
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
const struct acpi_hest_generic_data *gdata)
{
-#ifdef CONFIG_ACPI_APEI_PCIEAER
- struct pci_dev *dev;
-#endif
-
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
printk(
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
-#ifdef CONFIG_ACPI_APEI_PCIEAER
- dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
- pcie->device_id.bus, pcie->device_id.function);
- if (!dev) {
- pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
- pcie->device_id.segment, pcie->device_id.bus,
- pcie->device_id.slot, pcie->device_id.function);
- return;
- }
- if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
- cper_print_aer(pfx, dev, gdata->error_severity,
- (struct aer_capability_regs *) pcie->aer_info);
- pci_dev_put(dev);
-#endif
}
static const char *apei_estatus_section_flag_strs[] = {
aer_severity = cper_severity_to_aer(sev);
aer_recover_queue(pcie_err->device_id.segment,
pcie_err->device_id.bus,
- devfn, aer_severity);
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+ pcie_err->aer_info);
}
}
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
/* External interrupt vector is GSI */
- if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+ rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
+ if (rc) {
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err_edac_unreg;
}
- if (request_irq(ghes->irq, ghes_irq_func,
- 0, "GHES IRQ", ghes)) {
+ rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+ if (rc) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err_edac_unreg;
+++ /dev/null
-/*
- * Support for Core System Resources Table (CSRT)
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "ACPI: CSRT: " fmt
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sizes.h>
-
-ACPI_MODULE_NAME("CSRT");
-
-static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
- const struct acpi_csrt_group *grp)
-{
- const struct acpi_csrt_shared_info *si;
- struct resource res[3];
- size_t nres;
- int ret;
-
- memset(res, 0, sizeof(res));
- nres = 0;
-
- si = (const struct acpi_csrt_shared_info *)&grp[1];
- /*
- * The peripherals that are listed on CSRT typically support only
- * 32-bit addresses so we only use the low part of MMIO base for
- * now.
- */
- if (!si->mmio_base_high && si->mmio_base_low) {
- /*
- * There is no size of the memory resource in shared_info
- * so we assume that it is 4k here.
- */
- res[nres].start = si->mmio_base_low;
- res[nres].end = res[0].start + SZ_4K - 1;
- res[nres++].flags = IORESOURCE_MEM;
- }
-
- if (si->gsi_interrupt) {
- int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
- si->interrupt_mode,
- si->interrupt_polarity);
- res[nres].start = irq;
- res[nres].end = irq;
- res[nres++].flags = IORESOURCE_IRQ;
- }
-
- if (si->base_request_line || si->num_handshake_signals) {
- /*
- * We pass the driver a DMA resource describing the range
- * of request lines the device supports.
- */
- res[nres].start = si->base_request_line;
- res[nres].end = res[nres].start + si->num_handshake_signals - 1;
- res[nres++].flags = IORESOURCE_DMA;
- }
-
- ret = platform_device_add_resources(pdev, res, nres);
- if (ret) {
- if (si->gsi_interrupt)
- acpi_unregister_gsi(si->gsi_interrupt);
- return ret;
- }
-
- return 0;
-}
-
-static int __init
-acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
-{
- struct platform_device *pdev;
- char vendor[5], name[16];
- int ret, i;
-
- vendor[0] = grp->vendor_id;
- vendor[1] = grp->vendor_id >> 8;
- vendor[2] = grp->vendor_id >> 16;
- vendor[3] = grp->vendor_id >> 24;
- vendor[4] = '\0';
-
- if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
- return -ENODEV;
-
- snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
- pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
-
- /* Add resources based on the shared info */
- ret = acpi_csrt_parse_shared_info(pdev, grp);
- if (ret)
- goto fail;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
-
- for (i = 0; i < pdev->num_resources; i++)
- dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
-
- return 0;
-
-fail:
- platform_device_put(pdev);
- return ret;
-}
-
-/*
- * CSRT or Core System Resources Table is a proprietary ACPI table
- * introduced by Microsoft. This table can contain devices that are not in
- * the system DSDT table. In particular DMA controllers might be described
- * here.
- *
- * We present these devices as normal platform devices that don't have ACPI
- * IDs or handle. The platform device name will be something like
- * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
- */
-void __init acpi_csrt_init(void)
-{
- struct acpi_csrt_group *grp, *end;
- struct acpi_table_csrt *csrt;
- acpi_status status;
- int ret;
-
- status = acpi_get_table(ACPI_SIG_CSRT, 0,
- (struct acpi_table_header **)&csrt);
- if (ACPI_FAILURE(status)) {
- if (status != AE_NOT_FOUND)
- pr_warn("failed to get the CSRT table\n");
- return;
- }
-
- pr_debug("parsing CSRT table for devices\n");
-
- grp = (struct acpi_csrt_group *)(csrt + 1);
- end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
-
- while (grp < end) {
- ret = acpi_csrt_parse_resource_group(grp);
- if (ret) {
- pr_warn("error in parsing resource group: %d\n", ret);
- return;
- }
-
- grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
- }
-}
#define _COMPONENT ACPI_POWER_COMPONENT
ACPI_MODULE_NAME("device_pm");
-static DEFINE_MUTEX(acpi_pm_notifier_lock);
-
-/**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
- *
- * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
- * PM wakeup events. For example, wakeup events may be generated for bridges
- * if one of the devices below the bridge is signaling wakeup, even if the
- * bridge itself doesn't have a wakeup GPE associated with it.
- */
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler, void *context)
-{
- acpi_status status = AE_ALREADY_EXISTS;
-
- mutex_lock(&acpi_pm_notifier_lock);
-
- if (adev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_install_notify_handler(adev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler, context);
- if (ACPI_FAILURE(status))
- goto out;
-
- adev->wakeup.flags.notifier_present = true;
-
- out:
- mutex_unlock(&acpi_pm_notifier_lock);
- return status;
-}
-
-/**
- * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
- * @adev: ACPI device to remove the notifier from.
- */
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler)
-{
- acpi_status status = AE_BAD_PARAMETER;
-
- mutex_lock(&acpi_pm_notifier_lock);
-
- if (!adev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_remove_notify_handler(adev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler);
- if (ACPI_FAILURE(status))
- goto out;
-
- adev->wakeup.flags.notifier_present = false;
-
- out:
- mutex_unlock(&acpi_pm_notifier_lock);
- return status;
-}
-
/**
* acpi_power_state_string - String representation of ACPI device power state.
* @state: ACPI device power state to return the string representation of.
if (result)
return result;
} else if (state == ACPI_STATE_UNKNOWN) {
- /* No power resources and missing _PSC? Try to force D0. */
+ /*
+ * No power resources and missing _PSC? Cross fingers and make
+ * it D0 in hope that this is what the BIOS put the device into.
+ * [We tried to force D0 here by executing _PS0, but that broke
+ * Toshiba P870-303 in a nasty way.]
+ */
state = ACPI_STATE_D0;
- result = acpi_dev_pm_explicit_set(device, state);
- if (result)
- return result;
}
device->power.state = state;
return 0;
}
EXPORT_SYMBOL(acpi_bus_power_manageable);
+#ifdef CONFIG_PM
+static DEFINE_MUTEX(acpi_pm_notifier_lock);
+
+/**
+ * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
+ * @adev: ACPI device to add the notifier for.
+ * @context: Context information to pass to the notifier routine.
+ *
+ * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
+ * PM wakeup events. For example, wakeup events may be generated for bridges
+ * if one of the devices below the bridge is signaling wakeup, even if the
+ * bridge itself doesn't have a wakeup GPE associated with it.
+ */
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler, void *context)
+{
+ acpi_status status = AE_ALREADY_EXISTS;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler, context);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = true;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
+/**
+ * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @adev: ACPI device to remove the notifier from.
+ */
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (!adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_remove_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = false;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
bool acpi_bus_can_wakeup(acpi_handle handle)
{
struct acpi_device *device;
mutex_unlock(&adev->physical_node_lock);
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
+#endif /* CONFIG_PM */
static int ec_poll(struct acpi_ec *ec)
{
unsigned long flags;
- int repeat = 2; /* number of command restarts */
+ int repeat = 5; /* number of command restarts */
while (repeat--) {
unsigned long delay = jiffies +
msecs_to_jiffies(ec_delay);
}
advance_transaction(ec, acpi_ec_read_status(ec));
} while (time_before(jiffies, delay));
- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
- break;
pr_debug(PREFIX "controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
void acpi_pci_root_hp_init(void);
void acpi_platform_init(void);
int acpi_sysfs_init(void);
-void acpi_csrt_init(void);
#ifdef CONFIG_ACPI_CONTAINER
void acpi_container_init(void);
#else
/* bus enumerate */
printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__,
(char *)buffer.pointer);
- if (!root)
+ if (root)
+ acpiphp_check_host_bridge(handle);
+ else
handle_root_bridge_insertion(handle);
break;
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
-static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
- acpi_processor_suspend, acpi_processor_resume);
-
static struct acpi_driver acpi_processor_driver = {
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
.remove = acpi_processor_remove,
.notify = acpi_processor_notify,
},
- .drv.pm = &acpi_processor_pm,
};
#define INSTALL_NOTIFY_HANDLER 1
if (result < 0)
return result;
+ acpi_processor_syscore_init();
+
acpi_processor_install_hotplug_notify();
acpi_thermal_cpufreq_init();
acpi_processor_uninstall_hotplug_notify();
+ acpi_processor_syscore_exit();
+
acpi_bus_unregister_driver(&acpi_processor_driver);
return;
#include <linux/sched.h> /* need_resched() */
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
+#include <linux/syscore_ops.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
#endif
+#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
-static void acpi_idle_bm_rld_save(void)
+int acpi_processor_suspend(void)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+ return 0;
}
-static void acpi_idle_bm_rld_restore(void)
+
+void acpi_processor_resume(void)
{
u32 resumed_bm_rld;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+ if (resumed_bm_rld == saved_bm_rld)
+ return;
- if (resumed_bm_rld != saved_bm_rld)
- acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-int acpi_processor_suspend(struct device *dev)
+static struct syscore_ops acpi_processor_syscore_ops = {
+ .suspend = acpi_processor_suspend,
+ .resume = acpi_processor_resume,
+};
+
+void acpi_processor_syscore_init(void)
{
- acpi_idle_bm_rld_save();
- return 0;
+ register_syscore_ops(&acpi_processor_syscore_ops);
}
-int acpi_processor_resume(struct device *dev)
+void acpi_processor_syscore_exit(void)
{
- acpi_idle_bm_rld_restore();
- return 0;
+ unregister_syscore_ops(&acpi_processor_syscore_ops);
}
+#endif /* CONFIG_PM_SLEEP */
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
acpi_set_pnp_ids(handle, &pnp, type);
if (!pnp.type.hardware_id)
- return;
+ goto out;
/*
* This relies on the fact that acpi_install_notify_handler() will not
}
}
+out:
acpi_free_pnp_ids(&pnp);
}
acpi_pci_link_init();
acpi_platform_init();
acpi_lpss_init();
- acpi_csrt_init();
acpi_container_init();
acpi_memory_hotplug_init();
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion g6 Notebook PC",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+ },
+ },
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP 1000 Notebook PC",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
+ },
+ },
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion m4",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
+ },
+ },
{}
};
DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Asus UL30A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+ },
+ },
{ },
};
/*
* acard-ahci.c - ACard AHCI SATA support
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* ahci.c - AHCI SATA support
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
/*
* ahci.h - Common AHCI SATA definitions and declarations
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* ata_piix.c - Intel PATA/SATA controllers
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
ich8_2port_sata_snb,
+ ich8_2port_sata_byt,
};
struct piix_map_db {
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (BayTrail) */
+ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
{ } /* terminate list */
};
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
[ich8_2port_sata_snb] = &ich8_2port_map_db,
+ [ich8_2port_sata_byt] = &ich8_2port_map_db,
};
static struct pci_bits piix_enable_bits[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
+
+ [ich8_2port_sata_byt] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
};
#define AHCI_PCI_BAR 5
/*
* libahci.c - Common AHCI SATA low-level routines
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* libata-core.c - helper library for ATA
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
qc->tf = *tf;
if (cdb)
memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+
+ /* some SATA bridges need us to indicate data xfer direction */
+ if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
+ dma_dir == DMA_FROM_DEVICE)
+ qc->tf.feature |= ATAPI_DMADIR;
+
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
/*
* libata-eh.c - libata error handling
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* libata-scsi.c - helper library for ATA
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* libata-sff.c - helper library for PCI IDE BMDMA
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- err = -ENXIO;
- goto err_rel_gpio;
- }
-
ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(ide_base)) {
err = PTR_ERR(ide_base);
/*
* pdc_adma.c - Pacific Digital Corporation ADMA
*
- * Maintained by: Mark Lord <mlord@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
*
* Copyright 2005 Mark Lord
*
/*
* sata_promise.c - Promise SATA
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Mikael Pettersson <mikpe@it.uu.se>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
/* start host DMA transaction */
dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl &= ~ATAPI_CONTROL1_STOP;
dmactl |= ATAPI_CONTROL1_START;
iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
}
.bmdma_status = sata_rcar_bmdma_status,
};
-static int sata_rcar_serr_interrupt(struct ata_port *ap)
+static void sata_rcar_serr_interrupt(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
- int handled = 0;
u32 serror;
serror = ioread32(priv->base + SCRSERR_REG);
if (!serror)
- return 0;
+ return;
DPRINTK("SError @host_intr: 0x%x\n", serror);
ata_ehi_push_desc(ehi, "%s", "hotplug");
freeze = serror & SERR_COMM_WAKE ? 0 : 1;
- handled = 1;
}
/* freeze or abort */
ata_port_freeze(ap);
else
ata_port_abort(ap);
-
- return handled;
}
-static int sata_rcar_ata_interrupt(struct ata_port *ap)
+static void sata_rcar_ata_interrupt(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
int handled = 0;
if (qc)
handled |= ata_bmdma_port_intr(ap, qc);
- return handled;
+ /* be sure to clear ATA interrupt */
+ if (!handled)
+ sata_rcar_check_status(ap);
}
static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+ sataintstat &= SATA_RCAR_INT_MASK;
if (!sataintstat)
goto done;
/* ack */
- iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
- priv->base + SATAINTSTAT_REG);
+ iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG);
ap = host->ports[0];
if (sataintstat & SATAINTSTAT_ATA)
- handled |= sata_rcar_ata_interrupt(ap);
+ sata_rcar_ata_interrupt(ap);
if (sataintstat & SATAINTSTAT_SERR)
- handled |= sata_rcar_serr_interrupt(ap);
+ sata_rcar_serr_interrupt(ap);
+ handled = 1;
done:
spin_unlock_irqrestore(&host->lock, flags);
/*
* sata_sil.c - Silicon Image SATA
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* sata_sx4.c - Promise SATA
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
/*
* sata_via.c - VIA Serial ATA controllers
*
- * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
return subsys_register(subsys, groups, virtual_dir);
}
+EXPORT_SYMBOL_GPL(subsys_virtual_register);
int __init buses_init(void)
{
if (dev) {
WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
- "Write permission without 'store'\n");
+ "Attribute %s: write permission without 'store'\n",
+ attr->attr.name);
WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
- "Read permission without 'show'\n");
+ "Attribute %s: read permission without 'show'\n",
+ attr->attr.name);
error = sysfs_create_file(&dev->kobj, &attr->attr);
}
int dev_pm_put_subsys_data(struct device *dev)
{
struct pm_subsys_data *psd;
- int ret = 0;
+ int ret = 1;
spin_lock_irq(&dev->power.lock);
psd = dev_to_psd(dev);
- if (!psd) {
- ret = -EINVAL;
+ if (!psd)
goto out;
- }
if (--psd->refcount == 0) {
dev->power.subsys_data = NULL;
- kfree(psd);
- ret = 1;
+ } else {
+ psd = NULL;
+ ret = 0;
}
out:
spin_unlock_irq(&dev->power.lock);
+ kfree(psd);
return ret;
}
{ BCMA_CORE_I2S, "I2S" },
{ BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" },
{ BCMA_CORE_SHIM, "SHIM" },
+ { BCMA_CORE_PCIE2, "PCIe Gen2" },
+ { BCMA_CORE_ARM_CR4, "ARM CR4" },
{ BCMA_CORE_DEFAULT, "Default" },
};
spin_lock(&brd->brd_lock);
idx = sector >> PAGE_SECTORS_SHIFT;
+ page->index = idx;
if (radix_tree_insert(&brd->brd_pages, idx, page)) {
__free_page(page);
page = radix_tree_lookup(&brd->brd_pages, idx);
BUG_ON(!page);
BUG_ON(page->index != idx);
- } else
- page->index = idx;
+ }
spin_unlock(&brd->brd_lock);
radix_tree_preload_end();
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
+/*
+ * Increment the given counter and return its updated value.
+ * If the counter is already 0 it will not be incremented.
+ * If the counter is already at its maximum value returns
+ * -EINVAL without updating it.
+ */
+static int atomic_inc_return_safe(atomic_t *v)
+{
+ unsigned int counter;
+
+ counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+ if (counter <= (unsigned int)INT_MAX)
+ return (int)counter;
+
+ atomic_dec(v);
+
+ return -EINVAL;
+}
+
+/* Decrement the counter. Return the resulting value, or -EINVAL */
+static int atomic_dec_return_safe(atomic_t *v)
+{
+ int counter;
+
+ counter = atomic_dec_return(v);
+ if (counter >= 0)
+ return counter;
+
+ atomic_inc(v);
+
+ return -EINVAL;
+}
+
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
* block device image metadata (in-memory version)
*/
struct rbd_image_header {
- /* These four fields never change for a given rbd image */
+ /* These six fields never change for a given rbd image */
char *object_prefix;
- u64 features;
__u8 obj_order;
__u8 crypt_type;
__u8 comp_type;
+ u64 stripe_unit;
+ u64 stripe_count;
+ u64 features; /* Might be changeable someday? */
/* The remaining fields need to be updated occasionally */
u64 image_size;
struct ceph_snap_context *snapc;
- char *snap_names;
- u64 *snap_sizes;
-
- u64 stripe_unit;
- u64 stripe_count;
+ char *snap_names; /* format 1 only */
+ u64 *snap_sizes; /* format 1 only */
};
/*
};
};
struct page **copyup_pages;
+ u32 copyup_page_count;
struct ceph_osd_request *osd_req;
struct rbd_obj_request *obj_request; /* obj req initiator */
};
struct page **copyup_pages;
+ u32 copyup_page_count;
spinlock_t completion_lock;/* protects next_completion */
u32 next_completion;
rbd_img_callback_t callback;
struct rbd_spec *parent_spec;
u64 parent_overlap;
+ atomic_t parent_ref;
struct rbd_device *parent;
/* protects updating the header */
size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
size_t count);
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
+static void rbd_spec_put(struct rbd_spec *spec);
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
}
/*
- * Create a new header structure, translate header format from the on-disk
- * header.
+ * Fill an rbd image header with information from the given format 1
+ * on-disk header.
*/
-static int rbd_header_from_disk(struct rbd_image_header *header,
+static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
{
+ struct rbd_image_header *header = &rbd_dev->header;
+ bool first_time = header->object_prefix == NULL;
+ struct ceph_snap_context *snapc;
+ char *object_prefix = NULL;
+ char *snap_names = NULL;
+ u64 *snap_sizes = NULL;
u32 snap_count;
- size_t len;
size_t size;
+ int ret = -ENOMEM;
u32 i;
- memset(header, 0, sizeof (*header));
+ /* Allocate this now to avoid having to handle failure below */
- snap_count = le32_to_cpu(ondisk->snap_count);
+ if (first_time) {
+ size_t len;
- len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
- header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
- if (!header->object_prefix)
- return -ENOMEM;
- memcpy(header->object_prefix, ondisk->object_prefix, len);
- header->object_prefix[len] = '\0';
+ len = strnlen(ondisk->object_prefix,
+ sizeof (ondisk->object_prefix));
+ object_prefix = kmalloc(len + 1, GFP_KERNEL);
+ if (!object_prefix)
+ return -ENOMEM;
+ memcpy(object_prefix, ondisk->object_prefix, len);
+ object_prefix[len] = '\0';
+ }
+ /* Allocate the snapshot context and fill it in */
+
+ snap_count = le32_to_cpu(ondisk->snap_count);
+ snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
+ if (!snapc)
+ goto out_err;
+ snapc->seq = le64_to_cpu(ondisk->snap_seq);
if (snap_count) {
+ struct rbd_image_snap_ondisk *snaps;
u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
- /* Save a copy of the snapshot names */
+ /* We'll keep a copy of the snapshot names... */
- if (snap_names_len > (u64) SIZE_MAX)
- return -EIO;
- header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
- if (!header->snap_names)
+ if (snap_names_len > (u64)SIZE_MAX)
+ goto out_2big;
+ snap_names = kmalloc(snap_names_len, GFP_KERNEL);
+ if (!snap_names)
goto out_err;
+
+ /* ...as well as the array of their sizes. */
+
+ size = snap_count * sizeof (*header->snap_sizes);
+ snap_sizes = kmalloc(size, GFP_KERNEL);
+ if (!snap_sizes)
+ goto out_err;
+
/*
- * Note that rbd_dev_v1_header_read() guarantees
- * the ondisk buffer we're working with has
+ * Copy the names, and fill in each snapshot's id
+ * and size.
+ *
+ * Note that rbd_dev_v1_header_info() guarantees the
+ * ondisk buffer we're working with has
* snap_names_len bytes beyond the end of the
* snapshot id array, this memcpy() is safe.
*/
- memcpy(header->snap_names, &ondisk->snaps[snap_count],
- snap_names_len);
+ memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
+ snaps = ondisk->snaps;
+ for (i = 0; i < snap_count; i++) {
+ snapc->snaps[i] = le64_to_cpu(snaps[i].id);
+ snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
+ }
+ }
- /* Record each snapshot's size */
+ /* We won't fail any more, fill in the header */
- size = snap_count * sizeof (*header->snap_sizes);
- header->snap_sizes = kmalloc(size, GFP_KERNEL);
- if (!header->snap_sizes)
- goto out_err;
- for (i = 0; i < snap_count; i++)
- header->snap_sizes[i] =
- le64_to_cpu(ondisk->snaps[i].image_size);
+ down_write(&rbd_dev->header_rwsem);
+ if (first_time) {
+ header->object_prefix = object_prefix;
+ header->obj_order = ondisk->options.order;
+ header->crypt_type = ondisk->options.crypt_type;
+ header->comp_type = ondisk->options.comp_type;
+ /* The rest aren't used for format 1 images */
+ header->stripe_unit = 0;
+ header->stripe_count = 0;
+ header->features = 0;
} else {
- header->snap_names = NULL;
- header->snap_sizes = NULL;
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_names);
+ kfree(header->snap_sizes);
}
- header->features = 0; /* No features support in v1 images */
- header->obj_order = ondisk->options.order;
- header->crypt_type = ondisk->options.crypt_type;
- header->comp_type = ondisk->options.comp_type;
-
- /* Allocate and fill in the snapshot context */
+ /* The remaining fields always get updated (when we refresh) */
header->image_size = le64_to_cpu(ondisk->image_size);
+ header->snapc = snapc;
+ header->snap_names = snap_names;
+ header->snap_sizes = snap_sizes;
- header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
- if (!header->snapc)
- goto out_err;
- header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
- for (i = 0; i < snap_count; i++)
- header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
+ /* Make sure mapping size is consistent with header info */
- return 0;
+ if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
+ if (rbd_dev->mapping.size != header->image_size)
+ rbd_dev->mapping.size = header->image_size;
+
+ up_write(&rbd_dev->header_rwsem);
+ return 0;
+out_2big:
+ ret = -EIO;
out_err:
- kfree(header->snap_sizes);
- header->snap_sizes = NULL;
- kfree(header->snap_names);
- header->snap_names = NULL;
- kfree(header->object_prefix);
- header->object_prefix = NULL;
+ kfree(snap_sizes);
+ kfree(snap_names);
+ ceph_put_snap_context(snapc);
+ kfree(object_prefix);
- return -ENOMEM;
+ return ret;
}
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
- const char *snap_name = rbd_dev->spec->snap_name;
- u64 snap_id;
+ u64 snap_id = rbd_dev->spec->snap_id;
u64 size = 0;
u64 features = 0;
int ret;
- if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
- snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
- if (snap_id == CEPH_NOSNAP)
- return -ENOENT;
- } else {
- snap_id = CEPH_NOSNAP;
- }
-
ret = rbd_snap_size(rbd_dev, snap_id, &size);
if (ret)
return ret;
rbd_dev->mapping.size = size;
rbd_dev->mapping.features = features;
- /* If we are mapping a snapshot it must be marked read-only */
-
- if (snap_id != CEPH_NOSNAP)
- rbd_dev->mapping.read_only = true;
-
return 0;
}
{
rbd_dev->mapping.size = 0;
rbd_dev->mapping.features = 0;
- rbd_dev->mapping.read_only = true;
-}
-
-static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
-{
- rbd_dev->mapping.size = 0;
- rbd_dev->mapping.features = 0;
- rbd_dev->mapping.read_only = true;
}
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
-static void rbd_img_request_get(struct rbd_img_request *img_request)
-{
- dout("%s: img %p (was %d)\n", __func__, img_request,
- atomic_read(&img_request->kref.refcount));
- kref_get(&img_request->kref);
-}
-
+static bool img_request_child_test(struct rbd_img_request *img_request);
+static void rbd_parent_request_destroy(struct kref *kref);
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
rbd_assert(img_request != NULL);
dout("%s: img %p (was %d)\n", __func__, img_request,
atomic_read(&img_request->kref.refcount));
- kref_put(&img_request->kref, rbd_img_request_destroy);
+ if (img_request_child_test(img_request))
+ kref_put(&img_request->kref, rbd_parent_request_destroy);
+ else
+ kref_put(&img_request->kref, rbd_img_request_destroy);
}
static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
smp_mb();
}
+static void img_request_child_clear(struct rbd_img_request *img_request)
+{
+ clear_bit(IMG_REQ_CHILD, &img_request->flags);
+ smp_mb();
+}
+
static bool img_request_child_test(struct rbd_img_request *img_request)
{
smp_mb();
smp_mb();
}
+static void img_request_layered_clear(struct rbd_img_request *img_request)
+{
+ clear_bit(IMG_REQ_LAYERED, &img_request->flags);
+ smp_mb();
+}
+
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
smp_mb();
kmem_cache_free(rbd_obj_request_cache, obj_request);
}
+/* It's OK to call this for a device with no parent */
+
+static void rbd_spec_put(struct rbd_spec *spec);
+static void rbd_dev_unparent(struct rbd_device *rbd_dev)
+{
+ rbd_dev_remove_parent(rbd_dev);
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = NULL;
+ rbd_dev->parent_overlap = 0;
+}
+
+/*
+ * Parent image reference counting is used to determine when an
+ * image's parent fields can be safely torn down--after there are no
+ * more in-flight requests to the parent image. When the last
+ * reference is dropped, cleaning them up is safe.
+ */
+static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
+{
+ int counter;
+
+ if (!rbd_dev->parent_spec)
+ return;
+
+ counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
+ if (counter > 0)
+ return;
+
+ /* Last reference; clean up parent data structures */
+
+ if (!counter)
+ rbd_dev_unparent(rbd_dev);
+ else
+ rbd_warn(rbd_dev, "parent reference underflow\n");
+}
+
+/*
+ * If an image has a non-zero parent overlap, get a reference to its
+ * parent.
+ *
+ * We must get the reference before checking for the overlap to
+ * coordinate properly with zeroing the parent overlap in
+ * rbd_dev_v2_parent_info() when an image gets flattened. We
+ * drop it again if there is no overlap.
+ *
+ * Returns true if the rbd device has a parent with a non-zero
+ * overlap and a reference for it was successfully taken, or
+ * false otherwise.
+ */
+static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
+{
+ int counter;
+
+ if (!rbd_dev->parent_spec)
+ return false;
+
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+ if (counter > 0 && rbd_dev->parent_overlap)
+ return true;
+
+ /* Image was flattened, but parent is not yet torn down */
+
+ if (counter < 0)
+ rbd_warn(rbd_dev, "parent reference overflow\n");
+
+ return false;
+}
+
/*
* Caller is responsible for filling in the list of object requests
* that comprises the image request, and the Linux request pointer
static struct rbd_img_request *rbd_img_request_create(
struct rbd_device *rbd_dev,
u64 offset, u64 length,
- bool write_request,
- bool child_request)
+ bool write_request)
{
struct rbd_img_request *img_request;
} else {
img_request->snap_id = rbd_dev->spec->snap_id;
}
- if (child_request)
- img_request_child_set(img_request);
- if (rbd_dev->parent_spec)
+ if (rbd_dev_parent_get(rbd_dev))
img_request_layered_set(img_request);
spin_lock_init(&img_request->completion_lock);
img_request->next_completion = 0;
INIT_LIST_HEAD(&img_request->obj_requests);
kref_init(&img_request->kref);
- rbd_img_request_get(img_request); /* Avoid a warning */
- rbd_img_request_put(img_request); /* TEMPORARY */
-
dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
write_request ? "write" : "read", offset, length,
img_request);
rbd_img_obj_request_del(img_request, obj_request);
rbd_assert(img_request->obj_request_count == 0);
+ if (img_request_layered_test(img_request)) {
+ img_request_layered_clear(img_request);
+ rbd_dev_parent_put(img_request->rbd_dev);
+ }
+
if (img_request_write_test(img_request))
ceph_put_snap_context(img_request->snapc);
- if (img_request_child_test(img_request))
- rbd_obj_request_put(img_request->obj_request);
-
kmem_cache_free(rbd_img_request_cache, img_request);
}
+static struct rbd_img_request *rbd_parent_request_create(
+ struct rbd_obj_request *obj_request,
+ u64 img_offset, u64 length)
+{
+ struct rbd_img_request *parent_request;
+ struct rbd_device *rbd_dev;
+
+ rbd_assert(obj_request->img_request);
+ rbd_dev = obj_request->img_request->rbd_dev;
+
+ parent_request = rbd_img_request_create(rbd_dev->parent,
+ img_offset, length, false);
+ if (!parent_request)
+ return NULL;
+
+ img_request_child_set(parent_request);
+ rbd_obj_request_get(obj_request);
+ parent_request->obj_request = obj_request;
+
+ return parent_request;
+}
+
+static void rbd_parent_request_destroy(struct kref *kref)
+{
+ struct rbd_img_request *parent_request;
+ struct rbd_obj_request *orig_request;
+
+ parent_request = container_of(kref, struct rbd_img_request, kref);
+ orig_request = parent_request->obj_request;
+
+ parent_request->obj_request = NULL;
+ rbd_obj_request_put(orig_request);
+ img_request_child_clear(parent_request);
+
+ rbd_img_request_destroy(kref);
+}
+
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
- u64 length;
+ struct page **pages;
u32 page_count;
rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
rbd_dev = img_request->rbd_dev;
rbd_assert(rbd_dev);
- length = (u64)1 << rbd_dev->header.obj_order;
- page_count = (u32)calc_pages_for(0, length);
- rbd_assert(obj_request->copyup_pages);
- ceph_release_page_vector(obj_request->copyup_pages, page_count);
+ pages = obj_request->copyup_pages;
+ rbd_assert(pages != NULL);
obj_request->copyup_pages = NULL;
+ page_count = obj_request->copyup_page_count;
+ rbd_assert(page_count);
+ obj_request->copyup_page_count = 0;
+ ceph_release_page_vector(pages, page_count);
/*
* We want the transfer count to reflect the size of the
struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
struct page **pages;
- int result;
- u64 obj_size;
- u64 xferred;
+ u32 page_count;
+ int img_result;
+ u64 parent_length;
+ u64 offset;
+ u64 length;
rbd_assert(img_request_child_test(img_request));
pages = img_request->copyup_pages;
rbd_assert(pages != NULL);
img_request->copyup_pages = NULL;
+ page_count = img_request->copyup_page_count;
+ rbd_assert(page_count);
+ img_request->copyup_page_count = 0;
orig_request = img_request->obj_request;
rbd_assert(orig_request != NULL);
- rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
- result = img_request->result;
- obj_size = img_request->length;
- xferred = img_request->xferred;
+ rbd_assert(obj_request_type_valid(orig_request->type));
+ img_result = img_request->result;
+ parent_length = img_request->length;
+ rbd_assert(parent_length == img_request->xferred);
+ rbd_img_request_put(img_request);
- rbd_dev = img_request->rbd_dev;
+ rbd_assert(orig_request->img_request);
+ rbd_dev = orig_request->img_request->rbd_dev;
rbd_assert(rbd_dev);
- rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
- rbd_img_request_put(img_request);
+ /*
+ * If the overlap has become 0 (most likely because the
+ * image has been flattened) we need to free the pages
+ * and re-submit the original write request.
+ */
+ if (!rbd_dev->parent_overlap) {
+ struct ceph_osd_client *osdc;
- if (result)
- goto out_err;
+ ceph_release_page_vector(pages, page_count);
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ img_result = rbd_obj_request_submit(osdc, orig_request);
+ if (!img_result)
+ return;
+ }
- /* Allocate the new copyup osd request for the original request */
+ if (img_result)
+ goto out_err;
- result = -ENOMEM;
- rbd_assert(!orig_request->osd_req);
+ /*
+ * The original osd request is of no use to use any more.
+ * We need a new one that can hold the two ops in a copyup
+ * request. Allocate the new copyup osd request for the
+ * original request, and release the old one.
+ */
+ img_result = -ENOMEM;
osd_req = rbd_osd_req_create_copyup(orig_request);
if (!osd_req)
goto out_err;
+ rbd_osd_req_destroy(orig_request->osd_req);
orig_request->osd_req = osd_req;
orig_request->copyup_pages = pages;
+ orig_request->copyup_page_count = page_count;
/* Initialize the copyup op */
osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
- osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
+ osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
false, false);
/* Then the original write request op */
+ offset = orig_request->offset;
+ length = orig_request->length;
osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
- orig_request->offset,
- orig_request->length, 0, 0);
- osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
- orig_request->length);
+ offset, length, 0, 0);
+ if (orig_request->type == OBJ_REQUEST_BIO)
+ osd_req_op_extent_osd_data_bio(osd_req, 1,
+ orig_request->bio_list, length);
+ else
+ osd_req_op_extent_osd_data_pages(osd_req, 1,
+ orig_request->pages, length,
+ offset & ~PAGE_MASK, false, false);
rbd_osd_req_format_write(orig_request);
orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc;
- result = rbd_obj_request_submit(osdc, orig_request);
- if (!result)
+ img_result = rbd_obj_request_submit(osdc, orig_request);
+ if (!img_result)
return;
out_err:
/* Record the error code and complete the request */
- orig_request->result = result;
+ orig_request->result = img_result;
orig_request->xferred = 0;
obj_request_done_set(orig_request);
rbd_obj_request_complete(orig_request);
int result;
rbd_assert(obj_request_img_data_test(obj_request));
- rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ rbd_assert(obj_request_type_valid(obj_request->type));
img_request = obj_request->img_request;
rbd_assert(img_request != NULL);
rbd_dev = img_request->rbd_dev;
rbd_assert(rbd_dev->parent != NULL);
- /*
- * First things first. The original osd request is of no
- * use to use any more, we'll need a new one that can hold
- * the two ops in a copyup request. We'll get that later,
- * but for now we can release the old one.
- */
- rbd_osd_req_destroy(obj_request->osd_req);
- obj_request->osd_req = NULL;
-
/*
* Determine the byte range covered by the object in the
* child image to which the original request was to be sent.
}
result = -ENOMEM;
- parent_request = rbd_img_request_create(rbd_dev->parent,
- img_offset, length,
- false, true);
+ parent_request = rbd_parent_request_create(obj_request,
+ img_offset, length);
if (!parent_request)
goto out_err;
- rbd_obj_request_get(obj_request);
- parent_request->obj_request = obj_request;
result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
if (result)
goto out_err;
parent_request->copyup_pages = pages;
+ parent_request->copyup_page_count = page_count;
parent_request->callback = rbd_img_obj_parent_read_full_callback;
result = rbd_img_request_submit(parent_request);
return 0;
parent_request->copyup_pages = NULL;
+ parent_request->copyup_page_count = 0;
parent_request->obj_request = NULL;
rbd_obj_request_put(obj_request);
out_err:
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
struct rbd_obj_request *orig_request;
+ struct rbd_device *rbd_dev;
int result;
rbd_assert(!obj_request_img_data_test(obj_request));
obj_request->xferred, obj_request->length);
rbd_obj_request_put(obj_request);
- rbd_assert(orig_request);
- rbd_assert(orig_request->img_request);
+ /*
+ * If the overlap has become 0 (most likely because the
+ * image has been flattened) we need to free the pages
+ * and re-submit the original write request.
+ */
+ rbd_dev = orig_request->img_request->rbd_dev;
+ if (!rbd_dev->parent_overlap) {
+ struct ceph_osd_client *osdc;
+
+ rbd_obj_request_put(orig_request);
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ result = rbd_obj_request_submit(osdc, orig_request);
+ if (!result)
+ return;
+ }
/*
* Our only purpose here is to determine whether the object
struct rbd_obj_request *obj_request;
struct rbd_device *rbd_dev;
u64 obj_end;
+ u64 img_xferred;
+ int img_result;
rbd_assert(img_request_child_test(img_request));
+ /* First get what we need from the image request and release it */
+
obj_request = img_request->obj_request;
+ img_xferred = img_request->xferred;
+ img_result = img_request->result;
+ rbd_img_request_put(img_request);
+
+ /*
+ * If the overlap has become 0 (most likely because the
+ * image has been flattened) we need to re-submit the
+ * original request.
+ */
rbd_assert(obj_request);
rbd_assert(obj_request->img_request);
+ rbd_dev = obj_request->img_request->rbd_dev;
+ if (!rbd_dev->parent_overlap) {
+ struct ceph_osd_client *osdc;
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ img_result = rbd_obj_request_submit(osdc, obj_request);
+ if (!img_result)
+ return;
+ }
- obj_request->result = img_request->result;
+ obj_request->result = img_result;
if (obj_request->result)
goto out;
*/
rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
obj_end = obj_request->img_offset + obj_request->length;
- rbd_dev = obj_request->img_request->rbd_dev;
if (obj_end > rbd_dev->parent_overlap) {
u64 xferred = 0;
xferred = rbd_dev->parent_overlap -
obj_request->img_offset;
- obj_request->xferred = min(img_request->xferred, xferred);
+ obj_request->xferred = min(img_xferred, xferred);
} else {
- obj_request->xferred = img_request->xferred;
+ obj_request->xferred = img_xferred;
}
out:
- rbd_img_request_put(img_request);
rbd_img_obj_request_read_callback(obj_request);
rbd_obj_request_complete(obj_request);
}
static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
- struct rbd_device *rbd_dev;
struct rbd_img_request *img_request;
int result;
rbd_assert(obj_request_img_data_test(obj_request));
rbd_assert(obj_request->img_request != NULL);
rbd_assert(obj_request->result == (s32) -ENOENT);
- rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ rbd_assert(obj_request_type_valid(obj_request->type));
- rbd_dev = obj_request->img_request->rbd_dev;
- rbd_assert(rbd_dev->parent != NULL);
/* rbd_read_finish(obj_request, obj_request->length); */
- img_request = rbd_img_request_create(rbd_dev->parent,
+ img_request = rbd_parent_request_create(obj_request,
obj_request->img_offset,
- obj_request->length,
- false, true);
+ obj_request->length);
result = -ENOMEM;
if (!img_request)
goto out_err;
- rbd_obj_request_get(obj_request);
- img_request->obj_request = obj_request;
-
- result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
- obj_request->bio_list);
+ if (obj_request->type == OBJ_REQUEST_BIO)
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+ obj_request->bio_list);
+ else
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
+ obj_request->pages);
if (result)
goto out_err;
static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
struct rbd_device *rbd_dev = (struct rbd_device *)data;
+ int ret;
if (!rbd_dev)
return;
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
- (void)rbd_dev_refresh(rbd_dev);
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
rbd_obj_notify_ack(rbd_dev, notify_id);
}
* Request sync osd watch/unwatch. The value of "start" determines
* whether a watch request is being initiated or torn down.
*/
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
rbd_dev->watch_request->osd_req);
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, start);
+ rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
rbd_osd_req_format_write(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
goto end_request; /* Shouldn't happen */
}
+ result = -EIO;
+ if (offset + length > rbd_dev->mapping.size) {
+ rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
+ offset, length, rbd_dev->mapping.size);
+ goto end_request;
+ }
+
result = -ENOMEM;
img_request = rbd_img_request_create(rbd_dev, offset, length,
- write_request, false);
+ write_request);
if (!img_request)
goto end_request;
}
/*
- * Read the complete header for the given rbd device.
- *
- * Returns a pointer to a dynamically-allocated buffer containing
- * the complete and validated header. Caller can pass the address
- * of a variable that will be filled in with the version of the
- * header object at the time it was read.
- *
- * Returns a pointer-coded errno if a failure occurs.
+ * Read the complete header for the given rbd device. On successful
+ * return, the rbd_dev->header field will contain up-to-date
+ * information about the image.
*/
-static struct rbd_image_header_ondisk *
-rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
size += names_size;
ondisk = kmalloc(size, GFP_KERNEL);
if (!ondisk)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
0, size, ondisk);
if (ret < 0)
- goto out_err;
+ goto out;
if ((size_t)ret < size) {
ret = -ENXIO;
rbd_warn(rbd_dev, "short header read (want %zd got %d)",
size, ret);
- goto out_err;
+ goto out;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
rbd_warn(rbd_dev, "invalid header");
- goto out_err;
+ goto out;
}
names_size = le64_to_cpu(ondisk->snap_names_len);
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
- return ondisk;
-
-out_err:
- kfree(ondisk);
-
- return ERR_PTR(ret);
-}
-
-/*
- * reload the ondisk the header
- */
-static int rbd_read_header(struct rbd_device *rbd_dev,
- struct rbd_image_header *header)
-{
- struct rbd_image_header_ondisk *ondisk;
- int ret;
-
- ondisk = rbd_dev_v1_header_read(rbd_dev);
- if (IS_ERR(ondisk))
- return PTR_ERR(ondisk);
- ret = rbd_header_from_disk(header, ondisk);
+ ret = rbd_header_from_disk(rbd_dev, ondisk);
+out:
kfree(ondisk);
return ret;
}
-static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
-{
- if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
- return;
-
- if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
- sector_t size;
-
- rbd_dev->mapping.size = rbd_dev->header.image_size;
- size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
- dout("setting size to %llu sectors", (unsigned long long)size);
- set_capacity(rbd_dev->disk, size);
- }
-}
-
-/*
- * only read the first part of the ondisk header, without the snaps info
- */
-static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
-{
- int ret;
- struct rbd_image_header h;
-
- ret = rbd_read_header(rbd_dev, &h);
- if (ret < 0)
- return ret;
-
- down_write(&rbd_dev->header_rwsem);
-
- /* Update image size, and check for resize of mapped image */
- rbd_dev->header.image_size = h.image_size;
- rbd_update_mapping_size(rbd_dev);
-
- /* rbd_dev->header.object_prefix shouldn't change */
- kfree(rbd_dev->header.snap_sizes);
- kfree(rbd_dev->header.snap_names);
- /* osd requests may still refer to snapc */
- ceph_put_snap_context(rbd_dev->header.snapc);
-
- rbd_dev->header.image_size = h.image_size;
- rbd_dev->header.snapc = h.snapc;
- rbd_dev->header.snap_names = h.snap_names;
- rbd_dev->header.snap_sizes = h.snap_sizes;
- /* Free the extra copy of the object prefix */
- if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
- rbd_warn(rbd_dev, "object prefix changed (ignoring)");
- kfree(h.object_prefix);
-
- up_write(&rbd_dev->header_rwsem);
-
- return ret;
-}
-
/*
* Clear the rbd device's EXISTS flag if the snapshot it's mapped to
* has disappeared from the (just updated) snapshot context.
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
- u64 image_size;
+ u64 mapping_size;
int ret;
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
- image_size = rbd_dev->header.image_size;
+ mapping_size = rbd_dev->mapping.size;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
if (rbd_dev->image_format == 1)
- ret = rbd_dev_v1_refresh(rbd_dev);
+ ret = rbd_dev_v1_header_info(rbd_dev);
else
- ret = rbd_dev_v2_refresh(rbd_dev);
+ ret = rbd_dev_v2_header_info(rbd_dev);
/* If it's a mapped snapshot, validate its EXISTS flag */
rbd_exists_validate(rbd_dev);
mutex_unlock(&ctl_mutex);
- if (ret)
- rbd_warn(rbd_dev, "got notification but failed to "
- " update snaps: %d\n", ret);
- if (image_size != rbd_dev->header.image_size)
+ if (mapping_size != rbd_dev->mapping.size) {
+ sector_t size;
+
+ size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long)size);
+ set_capacity(rbd_dev->disk, size);
revalidate_disk(rbd_dev->disk);
+ }
return ret;
}
int ret;
ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
return ret < 0 ? ret : size;
}
spin_lock_init(&rbd_dev->lock);
rbd_dev->flags = 0;
+ atomic_set(&rbd_dev->parent_ref, 0);
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
__le64 snapid;
void *p;
void *end;
+ u64 pool_id;
char *image_id;
u64 overlap;
int ret;
p = reply_buf;
end = reply_buf + ret;
ret = -ERANGE;
- ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
- if (parent_spec->pool_id == CEPH_NOPOOL)
+ ceph_decode_64_safe(&p, end, pool_id, out_err);
+ if (pool_id == CEPH_NOPOOL) {
+ /*
+ * Either the parent never existed, or we have
+ * record of it but the image got flattened so it no
+ * longer has a parent. When the parent of a
+ * layered image disappears we immediately set the
+ * overlap to 0. The effect of this is that all new
+ * requests will be treated as if the image had no
+ * parent.
+ */
+ if (rbd_dev->parent_overlap) {
+ rbd_dev->parent_overlap = 0;
+ smp_mb();
+ rbd_dev_parent_put(rbd_dev);
+ pr_info("%s: clone image has been flattened\n",
+ rbd_dev->disk->disk_name);
+ }
+
goto out; /* No parent? No problem. */
+ }
/* The ceph file layout needs to fit pool id in 32 bits */
ret = -EIO;
- if (parent_spec->pool_id > (u64)U32_MAX) {
+ if (pool_id > (u64)U32_MAX) {
rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
- (unsigned long long)parent_spec->pool_id, U32_MAX);
+ (unsigned long long)pool_id, U32_MAX);
goto out_err;
}
+ parent_spec->pool_id = pool_id;
image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(image_id)) {
ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
ceph_decode_64_safe(&p, end, overlap, out_err);
- rbd_dev->parent_overlap = overlap;
- rbd_dev->parent_spec = parent_spec;
- parent_spec = NULL; /* rbd_dev now owns this */
+ if (overlap) {
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
+ rbd_dev->parent_overlap = overlap;
+ } else {
+ rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
+ }
out:
ret = 0;
out_err:
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
+ ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
return snap_name;
}
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
{
+ bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
down_write(&rbd_dev->header_rwsem);
+ if (first_time) {
+ ret = rbd_dev_v2_header_onetime(rbd_dev);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * If the image supports layering, get the parent info. We
+ * need to probe the first time regardless. Thereafter we
+ * only need to if there's a parent, to see if it has
+ * disappeared due to the mapped image getting flattened.
+ */
+ if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
+ (first_time || rbd_dev->parent_spec)) {
+ bool warn;
+
+ ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret)
+ goto out;
+
+ /*
+ * Print a warning if this is the initial probe and
+ * the image has a parent. Don't print it if the
+ * image now being probed is itself a parent. We
+ * can tell at this point because we won't know its
+ * pool name yet (just its pool id).
+ */
+ warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
+ if (first_time && warn)
+ rbd_warn(rbd_dev, "WARNING: kernel layering "
+ "is EXPERIMENTAL!");
+ }
+
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
goto out;
- rbd_update_mapping_size(rbd_dev);
+
+ if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
+ if (rbd_dev->mapping.size != rbd_dev->header.image_size)
+ rbd_dev->mapping.size = rbd_dev->header.image_size;
ret = rbd_dev_v2_snap_context(rbd_dev);
dout("rbd_dev_v2_snap_context returned %d\n", ret);
- if (ret)
- goto out;
out:
up_write(&rbd_dev->header_rwsem);
{
struct rbd_image_header *header;
- rbd_dev_remove_parent(rbd_dev);
- rbd_spec_put(rbd_dev->parent_spec);
- rbd_dev->parent_spec = NULL;
- rbd_dev->parent_overlap = 0;
+ /* Drop parent reference unless it's already been done (or none) */
+
+ if (rbd_dev->parent_overlap)
+ rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
memset(header, 0, sizeof (*header));
}
-static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
{
int ret;
- /* Populate rbd image metadata */
-
- ret = rbd_read_header(rbd_dev, &rbd_dev->header);
- if (ret < 0)
- goto out_err;
-
- /* Version 1 images have no parent (no layering) */
-
- rbd_dev->parent_spec = NULL;
- rbd_dev->parent_overlap = 0;
-
- dout("discovered version 1 image, header name is %s\n",
- rbd_dev->header_name);
-
- return 0;
-
-out_err:
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
- kfree(rbd_dev->spec->image_id);
- rbd_dev->spec->image_id = NULL;
-
- return ret;
-}
-
-static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
-{
- int ret;
-
- ret = rbd_dev_v2_image_size(rbd_dev);
- if (ret)
- goto out_err;
-
- /* Get the object prefix (a.k.a. block_name) for the image */
-
ret = rbd_dev_v2_object_prefix(rbd_dev);
if (ret)
goto out_err;
- /* Get the and check features for the image */
-
+ /*
+ * Get the and check features for the image. Currently the
+ * features are assumed to never change.
+ */
ret = rbd_dev_v2_features(rbd_dev);
if (ret)
goto out_err;
- /* If the image supports layering, get the parent info */
-
- if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
- if (ret)
- goto out_err;
-
- /*
- * Don't print a warning for parent images. We can
- * tell this point because we won't know its pool
- * name yet (just its pool id).
- */
- if (rbd_dev->spec->pool_name)
- rbd_warn(rbd_dev, "WARNING: kernel layering "
- "is EXPERIMENTAL!");
- }
-
/* If the image supports fancy striping, get its parameters */
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
if (ret < 0)
goto out_err;
}
-
- /* crypto and compression type aren't (yet) supported for v2 images */
-
- rbd_dev->header.crypt_type = 0;
- rbd_dev->header.comp_type = 0;
-
- /* Get the snapshot context, plus the header version */
-
- ret = rbd_dev_v2_snap_context(rbd_dev);
- if (ret)
- goto out_err;
-
- dout("discovered version 2 image, header name is %s\n",
- rbd_dev->header_name);
+ /* No support for crypto and compression type format 2 images */
return 0;
out_err:
- rbd_dev->parent_overlap = 0;
- rbd_spec_put(rbd_dev->parent_spec);
- rbd_dev->parent_spec = NULL;
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
+ rbd_dev->header.features = 0;
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
if (!parent)
goto out_err;
- ret = rbd_dev_image_probe(parent);
+ ret = rbd_dev_image_probe(parent, false);
if (ret < 0)
goto out_err;
rbd_dev->parent = parent;
+ atomic_set(&rbd_dev->parent_ref, 1);
return 0;
out_err:
if (parent) {
- rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev_unparent(rbd_dev);
kfree(rbd_dev->header_name);
rbd_dev_destroy(parent);
} else {
{
int ret;
- ret = rbd_dev_mapping_set(rbd_dev);
- if (ret)
- return ret;
-
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
if (ret)
goto err_out_blkdev;
- ret = rbd_bus_add_dev(rbd_dev);
+ ret = rbd_dev_mapping_set(rbd_dev);
if (ret)
goto err_out_disk;
+ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+
+ ret = rbd_bus_add_dev(rbd_dev);
+ if (ret)
+ goto err_out_mapping;
/* Everything's ready. Announce the disk to the world. */
- set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
add_disk(rbd_dev->disk);
return ret;
+err_out_mapping:
+ rbd_dev_mapping_clear(rbd_dev);
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
- int ret;
-
rbd_dev_unprobe(rbd_dev);
- ret = rbd_dev_header_watch_sync(rbd_dev, 0);
- if (ret)
- rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
rbd_dev->image_format = 0;
/*
* Probe for the existence of the header object for the given rbd
- * device. For format 2 images this includes determining the image
- * id.
+ * device. If this image is the one being mapped (i.e., not a
+ * parent), initiate a watch on its header object before using that
+ * object to get detailed information about the rbd image.
*/
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
{
int ret;
int tmp;
if (ret)
goto err_out_format;
- ret = rbd_dev_header_watch_sync(rbd_dev, 1);
- if (ret)
- goto out_header_name;
+ if (mapping) {
+ ret = rbd_dev_header_watch_sync(rbd_dev, true);
+ if (ret)
+ goto out_header_name;
+ }
if (rbd_dev->image_format == 1)
- ret = rbd_dev_v1_probe(rbd_dev);
+ ret = rbd_dev_v1_header_info(rbd_dev);
else
- ret = rbd_dev_v2_probe(rbd_dev);
+ ret = rbd_dev_v2_header_info(rbd_dev);
if (ret)
goto err_out_watch;
goto err_out_probe;
ret = rbd_dev_probe_parent(rbd_dev);
- if (!ret)
- return 0;
+ if (ret)
+ goto err_out_probe;
+
+ dout("discovered format %u image, header name is %s\n",
+ rbd_dev->image_format, rbd_dev->header_name);
+ return 0;
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
- tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
- if (tmp)
- rbd_warn(rbd_dev, "unable to tear down watch request\n");
+ if (mapping) {
+ tmp = rbd_dev_header_watch_sync(rbd_dev, false);
+ if (tmp)
+ rbd_warn(rbd_dev, "unable to tear down "
+ "watch request (%d)\n", tmp);
+ }
out_header_name:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
struct ceph_osd_client *osdc;
+ bool read_only;
int rc = -ENOMEM;
if (!try_module_get(THIS_MODULE))
rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
if (rc < 0)
goto err_out_module;
+ read_only = rbd_opts->read_only;
+ kfree(rbd_opts);
+ rbd_opts = NULL; /* done with this */
rbdc = rbd_get_client(ceph_opts);
if (IS_ERR(rbdc)) {
rbdc = NULL; /* rbd_dev now owns this */
spec = NULL; /* rbd_dev now owns this */
- rbd_dev->mapping.read_only = rbd_opts->read_only;
- kfree(rbd_opts);
- rbd_opts = NULL; /* done with this */
-
- rc = rbd_dev_image_probe(rbd_dev);
+ rc = rbd_dev_image_probe(rbd_dev, true);
if (rc < 0)
goto err_out_rbd_dev;
+ /* If we are mapping a snapshot it must be marked read-only */
+
+ if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+ read_only = true;
+ rbd_dev->mapping.read_only = read_only;
+
rc = rbd_dev_device_setup(rbd_dev);
if (!rc)
return count;
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- rbd_dev_clear_mapping(rbd_dev);
+ rbd_dev_mapping_clear(rbd_dev);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
rbd_dev->major = 0;
rbd_dev_id_put(rbd_dev);
spin_unlock_irq(&rbd_dev->lock);
if (ret < 0)
goto done;
- ret = count;
rbd_bus_del_dev(rbd_dev);
+ ret = rbd_dev_header_watch_sync(rbd_dev, false);
+ if (ret)
+ rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
rbd_dev_image_release(rbd_dev);
module_put(THIS_MODULE);
+ ret = count;
done:
mutex_unlock(&ctl_mutex);
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
/* device id and bus width */
- of_property_read_u32(dev->dev.of_node, "port-number", &id);
- if (id < 0)
+ if (of_property_read_u32(dev->dev.of_node, "port-number", &id))
id = 0;
if (of_find_property(dev->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
clk_prepare_enable(mxc_rng->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENOENT;
- goto err_region;
- }
-
mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mxc_rng->mem)) {
err = PTR_ERR(mxc_rng->mem);
return 0;
err_ioremap:
-err_region:
clk_disable_unprepare(mxc_rng->clk);
out:
dev_set_drvdata(&pdev->dev, priv);
priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!priv->mem_res) {
- ret = -ENOENT;
- goto err_ioremap;
- }
-
priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
enum bt_states state;
unsigned char seq; /* BT sequence number */
struct si_sm_io *io;
- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
int write_count;
- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
int read_count;
int truncated;
long timeout; /* microseconds countdown */
return ipmi_ioctl(filep, cmd, arg);
}
}
+
+static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_mutex);
+ ret = compat_ipmi_ioctl(filep, cmd, arg);
+ mutex_unlock(&ipmi_mutex);
+
+ return ret;
+}
#endif
static const struct file_operations ipmi_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = ipmi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_ipmi_ioctl,
+ .compat_ioctl = unlocked_compat_ipmi_ioctl,
#endif
.open = ipmi_open,
.release = ipmi_release,
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+ entry->name = kstrdup(name, GFP_KERNEL);
if (!entry->name) {
kfree(entry);
return -ENOMEM;
}
- strcpy(entry->name, name);
file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
if (!file) {
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- dev_warn(smi_info->dev, "Could not enable interrupts"
- ", failed get, using polled mode.\n");
+ dev_warn(smi_info->dev,
+ "Couldn't get irq info: %x.\n", msg[2]);
+ dev_warn(smi_info->dev,
+ "Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0)
- dev_warn(smi_info->dev, "Could not enable interrupts"
- ", failed set, using polled mode.\n");
- else
+ if (msg[2] != 0) {
+ dev_warn(smi_info->dev,
+ "Couldn't set irq info: %x.\n", msg[2]);
+ dev_warn(smi_info->dev,
+ "Maybe ok, but ipmi might run very slowly.\n");
+ } else
smi_info->interrupt_disabled = 0;
smi_info->si_state = SI_NORMAL;
break;
return -EFAULT;
break;
case LPGETSTATUS:
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
lp_claim_parport_or_block (&lp_table[minor]);
status = r_str(minor);
lp_release_parport (&lp_table[minor]);
+ mutex_unlock(&lp_table[minor].port_mutex);
if (copy_to_user(argp, &status, sizeof(int)))
return -EFAULT;
if (r->entropy_count / 8 < min + reserved) {
nbytes = 0;
} else {
+ int entropy_count, orig;
+retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
/* If limited, never pull more than available */
- if (r->limit && nbytes + reserved >= r->entropy_count / 8)
- nbytes = r->entropy_count/8 - reserved;
-
- if (r->entropy_count / 8 >= nbytes + reserved)
- r->entropy_count -= nbytes*8;
- else
- r->entropy_count = reserved;
+ if (r->limit && nbytes + reserved >= entropy_count / 8)
+ nbytes = entropy_count/8 - reserved;
+
+ if (entropy_count / 8 >= nbytes + reserved) {
+ entropy_count -= nbytes*8;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ } else {
+ entropy_count = reserved;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ }
- if (r->entropy_count < random_write_wakeup_thresh)
+ if (entropy_count < random_write_wakeup_thresh)
wakeup_write = 1;
}
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
/* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled && !r->last_data_init)
- nbytes += EXTRACT_SIZE;
+ if (fips_enabled) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!r->last_data_init) {
+ r->last_data_init = true;
+ spin_unlock_irqrestore(&r->lock, flags);
+ trace_extract_entropy(r->name, EXTRACT_SIZE,
+ r->entropy_count, _RET_IP_);
+ xfer_secondary_pool(r, EXTRACT_SIZE);
+ extract_buf(r, tmp);
+ spin_lock_irqsave(&r->lock, flags);
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ }
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
extract_buf(r, tmp);
if (fips_enabled) {
- unsigned long flags;
-
-
- /* prime last_data value if need be, per fips 140-2 */
- if (!r->last_data_init) {
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- r->last_data_init = true;
- nbytes -= EXTRACT_SIZE;
- spin_unlock_irqrestore(&r->lock, flags);
- extract_buf(r, tmp);
- }
-
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
{
int ret = -ENOMEM;
- tpk_port.port.ops = &null_ops;
mutex_init(&tpk_port.port_write_mutex);
ttyprintk_driver = tty_alloc_driver(1,
return PTR_ERR(ttyprintk_driver);
tty_port_init(&tpk_port.port);
+ tpk_port.port.ops = &null_ops;
ttyprintk_driver->driver_name = "ttyprintk";
ttyprintk_driver->name = "ttyprintk";
unsigned char reg;
unsigned char rdiv;
- if (hwdata->num > 5)
+ if (hwdata->num <= 5)
reg = si5351_msynth_params_address(hwdata->num) + 2;
else
reg = SI5351_CLK6_7_OUTPUT_DIVIDER;
return -EINVAL;
}
drvdata->onecell.clks[n] = clk;
+
+ /* set initial clkout rate */
+ if (pdata->clkout[n].rate != 0) {
+ int ret;
+ ret = clk_set_rate(clk, pdata->clkout[n].rate);
+ if (ret != 0) {
+ dev_err(&client->dev, "Cannot set rate : %d\n",
+ ret);
+ }
+ }
}
ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
writel(divisor, cdev->div_reg);
vt8500_pmc_wait_busy();
- spin_lock_irqsave(cdev->lock, flags);
+ spin_unlock_irqrestore(cdev->lock, flags);
return 0;
}
*/
#include <linux/clk.h>
+#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/init.h>
GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+ GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
+ CLK_IGNORE_UNUSED, 0),
GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
GATE(smmu_rotator, "smmu_rotator", "aclk200",
E4210_GATE_IP_IMAGE, 4, 0, 0),
GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0),
+ GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
+ CLK_IGNORE_UNUSED, 0),
GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
struct clk *clk;
int i;
+ /* ac97 */
+ clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 3, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra20-ac97");
+ clks[ac97] = clk;
+
/* apbdma */
clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
0, 34, &periph_h_regs,
{uartc, pll_p, 0, 0},
{uartd, pll_p, 0, 0},
{uarte, pll_p, 0, 0},
- {usbd, clk_max, 12000000, 0},
- {usb2, clk_max, 12000000, 0},
- {usb3, clk_max, 12000000, 0},
{pll_a, clk_max, 56448000, 1},
{pll_a_out0, clk_max, 11289600, 1},
{cdev1, clk_max, 0, 1},
return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < num_parents; i++) {
+ /* set main clock registers */
+ clk->reg_sel[0] = reg_sel[0];
+ clk->reg_bits[0] = reg_bits[0];
+ clk->reg_mask[0] = reg_mask[0];
+
+ /* handle clocks with more than one parent */
+ for (i = 1; i < num_parents; i++) {
clk->reg_sel[i] = reg_sel[i];
clk->reg_bits[i] = reg_bits[i];
clk->reg_mask[i] = reg_mask[i];
clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
BIT(0), 0);
clk_register_clkdev(clk, "fsmc", NULL);
- clk_register_clkdev(clk, NULL, "smsc911x");
+ clk_register_clkdev(clk, NULL, "smsc911x.0");
clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
BIT(1), 0);
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/platform_data/clk-lpss.h>
#include <linux/platform_device.h>
#define PRV_CLOCK_PARAMS 0x800
static int lpt_clk_probe(struct platform_device *pdev)
{
+ struct lpss_clk_data *drvdata;
struct clk *clk;
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
/* LPSS free running clock */
- clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT,
- 100000000);
+ drvdata->name = "lpss_clk";
+ clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL,
+ CLK_IS_ROOT, 100000000);
if (IS_ERR(clk))
return PTR_ERR(clk);
- /* Shared DMA clock */
- clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
+ drvdata->clk = clk;
+ platform_set_drvdata(pdev, drvdata);
return 0;
}
choice
prompt "Default CPUFreq governor"
- default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
help
This option sets which CPUFreq governor shall be loaded at
#
config ARM_BIG_LITTLE_CPUFREQ
- tristate
- depends on ARM_CPU_TOPOLOGY
+ tristate "Generic ARM big LITTLE CPUfreq driver"
+ depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
config ARM_DT_BL_CPUFREQ
- tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
- select ARM_BIG_LITTLE_CPUFREQ
- depends on OF && HAVE_CLK
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
help
- This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
- This gets frequency tables from DT.
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs"
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
select CPU_FREQ_TABLE
- depends on X86_32
+ depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
does not have any safeguards to prevent operating the CPU out of spec
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
-static int cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
static unsigned int bL_cpufreq_get(unsigned int cpu)
{
u32 cur_cluster = cpu_to_cluster(cpu);
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
}
int (*init_opp_table)(struct device *cpu_dev);
};
+static inline int cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "arm_big_little.h"
-static int dt_init_opp_table(struct device *cpu_dev)
+/* get cpu node with valid operating-points */
+static struct device_node *get_cpu_node_with_valid_op(int cpu)
{
- struct device_node *np, *parent;
- int count = 0, ret;
+ struct device_node *np = NULL, *parent;
+ int count = 0;
parent = of_find_node_by_path("/cpus");
if (!parent) {
pr_err("failed to find OF /cpus\n");
- return -ENOENT;
+ return NULL;
}
for_each_child_of_node(parent, np) {
- if (count++ != cpu_dev->id)
+ if (count++ != cpu)
continue;
if (!of_get_property(np, "operating-points", NULL)) {
- ret = -ENODATA;
- } else {
- cpu_dev->of_node = np;
- ret = of_init_opp_table(cpu_dev);
+ of_node_put(np);
+ np = NULL;
}
- of_node_put(np);
- of_node_put(parent);
- return ret;
+ break;
}
- return -ENODEV;
+ of_node_put(parent);
+ return np;
}
-static int dt_get_transition_latency(struct device *cpu_dev)
+static int dt_init_opp_table(struct device *cpu_dev)
{
- struct device_node *np, *parent;
- u32 transition_latency = CPUFREQ_ETERNAL;
- int count = 0;
+ struct device_node *np;
+ int ret;
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_err("failed to find OF /cpus\n");
- return -ENOENT;
- }
+ np = get_cpu_node_with_valid_op(cpu_dev->id);
+ if (!np)
+ return -ENODATA;
- for_each_child_of_node(parent, np) {
- if (count++ != cpu_dev->id)
- continue;
+ cpu_dev->of_node = np;
+ ret = of_init_opp_table(cpu_dev);
+ of_node_put(np);
- of_property_read_u32(np, "clock-latency", &transition_latency);
- of_node_put(np);
- of_node_put(parent);
+ return ret;
+}
- return 0;
- }
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+ struct device_node *np;
+ u32 transition_latency = CPUFREQ_ETERNAL;
+
+ np = get_cpu_node_with_valid_op(cpu_dev->id);
+ if (!np)
+ return CPUFREQ_ETERNAL;
- return -ENODEV;
+ of_property_read_u32(np, "clock-latency", &transition_latency);
+ of_node_put(np);
+
+ pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
+ return transition_latency;
}
static struct cpufreq_arm_bL_ops dt_bL_ops = {
.init_opp_table = dt_init_opp_table,
};
-static int generic_bL_init(void)
+static int generic_bL_probe(struct platform_device *pdev)
{
+ struct device_node *np;
+
+ np = get_cpu_node_with_valid_op(0);
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
return bL_cpufreq_register(&dt_bL_ops);
}
-module_init(generic_bL_init);
-static void generic_bL_exit(void)
+static int generic_bL_remove(struct platform_device *pdev)
{
- return bL_cpufreq_unregister(&dt_bL_ops);
+ bL_cpufreq_unregister(&dt_bL_ops);
+ return 0;
}
-module_exit(generic_bL_exit);
+
+static struct platform_driver generic_bL_platdrv = {
+ .driver = {
+ .name = "arm-bL-cpufreq-dt",
+ .owner = THIS_MODULE,
+ },
+ .probe = generic_bL_probe,
+ .remove = generic_bL_remove,
+};
+module_platform_driver(generic_bL_platdrv);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
- long freq_Hz;
+ long freq_Hz, freq_exact;
unsigned int index;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
+ freq_exact = freq_Hz;
freqs.new = freq_Hz / 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
}
}
- ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg)
if (!np) {
pr_err("failed to find cpu0 node\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto out_put_parent;
}
cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
+ if (IS_ERR(cpu_reg)) {
+ /*
+ * If cpu0 regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
+ */
+ if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+ dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
+ ret = -EPROBE_DEFER;
+ goto out_put_node;
+ }
+ pr_warn("failed to get cpu0 regulator: %ld\n",
+ PTR_ERR(cpu_reg));
+ cpu_reg = NULL;
+ }
+
cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
goto out_put_node;
}
- cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
- if (IS_ERR(cpu_reg)) {
- pr_warn("failed to get cpu0 regulator\n");
- cpu_reg = NULL;
- }
-
ret = of_init_opp_table(cpu_dev);
if (ret) {
pr_err("failed to init OPP table: %d\n", ret);
opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
+out_put_parent:
+ of_node_put(parent);
return ret;
}
__func__, cpu_dev->id, cpu);
}
+ if ((cpus == 1) && (cpufreq_driver->target))
+ __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
cpufreq_cpu_put(data);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
lock_policy_rwsem_read(cpu);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
/* end old governor */
if (data->governor) {
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ unlock_policy_rwsem_write(policy->cpu);
__cpufreq_governor(data,
CPUFREQ_GOV_POLICY_EXIT);
+ lock_policy_rwsem_write(policy->cpu);
}
/* start new governor */
data->governor = policy->governor;
if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
- if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
+ if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
failed = 0;
- else
+ } else {
+ unlock_policy_rwsem_write(policy->cpu);
__cpufreq_governor(data,
CPUFREQ_GOV_POLICY_EXIT);
+ lock_policy_rwsem_write(policy->cpu);
+ }
}
if (failed) {
if (dev) {
switch (action) {
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_UP_CANCELED_FROZEN:
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
}
#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/cpu.h>
#include "cpufreq_governor.h"
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
} else {
+ get_online_cpus();
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
+ put_online_cpus();
}
}
EXPORT_SYMBOL_GPL(gov_queue_work);
if (have_governor_per_policy()) {
WARN_ON(dbs_data);
} else if (dbs_data) {
+ dbs_data->usage_count++;
policy->governor_data = dbs_data;
return 0;
}
}
dbs_data->cdata = cdata;
+ dbs_data->usage_count = 1;
rc = cdata->init(dbs_data);
if (rc) {
pr_err("%s: POLICY_INIT: init() failed\n", __func__);
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
latency * LATENCY_MULTIPLIER));
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ if ((cdata->governor == GOV_CONSERVATIVE) &&
+ (!policy->governor->initialized)) {
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
cpufreq_register_notifier(cs_ops->notifier_block,
return 0;
case CPUFREQ_GOV_POLICY_EXIT:
- if ((policy->governor->initialized == 1) ||
- have_governor_per_policy()) {
+ if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
+ (policy->governor->initialized == 1)) {
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
cpufreq_unregister_notifier(cs_ops->notifier_block,
struct dbs_data {
struct common_dbs_data *cdata;
unsigned int min_sampling_rate;
+ int usage_count;
void *tuners;
/* dbs_mutex protects dbs_enable in governor start/stop */
tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
- pr_info("%s: tuners %p\n", __func__, tuners);
mutex_init(&dbs_data->mutex);
return 0;
}
switch (action) {
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu);
break;
case CPU_DEAD:
- case CPU_DEAD_FROZEN:
+ cpufreq_stats_free_table(cpu);
+ break;
+ case CPU_UP_CANCELED_FROZEN:
+ cpufreq_stats_free_sysfs(cpu);
cpufreq_stats_free_table(cpu);
break;
}
}
struct sample {
- ktime_t start_time;
- ktime_t end_time;
int core_pct_busy;
- int pstate_pct_busy;
- u64 duration_us;
- u64 idletime_us;
u64 aperf;
u64 mperf;
int freq;
struct pstate_adjust_policy *pstate_policy;
struct pstate_data pstate;
struct _pid pid;
- struct _pid idle_pid;
int min_pstate_count;
- int idle_mode;
- ktime_t prev_sample;
- u64 prev_idle_time_us;
u64 prev_aperf;
u64 prev_mperf;
int sample_ptr;
int min_perf_pct;
int32_t max_perf;
int32_t min_perf;
+ int max_policy_pct;
+ int max_sysfs_pct;
};
static struct perf_limits limits = {
.max_perf = int_tofp(1),
.min_perf_pct = 0,
.min_perf = 0,
+ .max_policy_pct = 100,
+ .max_sysfs_pct = 100,
};
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
0);
}
-static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
-{
- pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
- pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
- pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
-
- pid_reset(&cpu->idle_pid,
- 75,
- 50,
- cpu->pstate_policy->deadband,
- 0);
-}
-
static inline void intel_pstate_reset_all_pid(void)
{
unsigned int cpu;
if (ret != 1)
return -EINVAL;
- limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
return count;
}
if (pstate == cpu->pstate.current_pstate)
return;
-#ifndef MODULE
trace_cpu_frequency(pstate * 100000, cpu->cpu);
-#endif
+
cpu->pstate.current_pstate = pstate;
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
struct sample *sample)
{
u64 core_pct;
- sample->pstate_pct_busy = 100 - div64_u64(
- sample->idletime_us * 100,
- sample->duration_us);
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
- sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
- 100);
+ sample->core_pct_busy = core_pct;
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
- ktime_t now;
- u64 idle_time_us;
u64 aperf, mperf;
- now = ktime_get();
- idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
-
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- /* for the first sample, don't actually record a sample, just
- * set the baseline */
- if (cpu->prev_idle_time_us > 0) {
- cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
- cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
- cpu->samples[cpu->sample_ptr].end_time = now;
- cpu->samples[cpu->sample_ptr].duration_us =
- ktime_us_delta(now, cpu->prev_sample);
- cpu->samples[cpu->sample_ptr].idletime_us =
- idle_time_us - cpu->prev_idle_time_us;
-
- cpu->samples[cpu->sample_ptr].aperf = aperf;
- cpu->samples[cpu->sample_ptr].mperf = mperf;
- cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
- cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-
- intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
- }
+ cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+ cpu->samples[cpu->sample_ptr].aperf = aperf;
+ cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+ cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
- cpu->prev_sample = now;
- cpu->prev_idle_time_us = idle_time_us;
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
}
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
-static inline void intel_pstate_idle_mode(struct cpudata *cpu)
-{
- cpu->idle_mode = 1;
-}
-
-static inline void intel_pstate_normal_mode(struct cpudata *cpu)
-{
- cpu->idle_mode = 0;
-}
-
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t busy_scaled;
intel_pstate_pstate_decrease(cpu, steps);
}
-static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
-{
- int busy_scaled;
- struct _pid *pid;
- int ctl = 0;
- int steps;
-
- pid = &cpu->idle_pid;
-
- busy_scaled = intel_pstate_get_scaled_busy(cpu);
-
- ctl = pid_calc(pid, 100 - busy_scaled);
-
- steps = abs(ctl);
- if (ctl < 0)
- intel_pstate_pstate_decrease(cpu, steps);
- else
- intel_pstate_pstate_increase(cpu, steps);
-
- if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
- intel_pstate_normal_mode(cpu);
-}
-
static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
intel_pstate_sample(cpu);
+ intel_pstate_adjust_busy_pstate(cpu);
- if (!cpu->idle_mode)
- intel_pstate_adjust_busy_pstate(cpu);
- else
- intel_pstate_adjust_idle_pstate(cpu);
-
-#if defined(XPERF_FIX)
if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
cpu->min_pstate_count++;
if (!(cpu->min_pstate_count % 5)) {
intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
- intel_pstate_idle_mode(cpu);
}
} else
cpu->min_pstate_count = 0;
-#endif
+
intel_pstate_set_sample_time(cpu);
}
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, default_policy),
ICPU(0x2d, default_policy),
+ ICPU(0x3a, default_policy),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
(unsigned long)cpu;
cpu->timer.expires = jiffies + HZ/100;
intel_pstate_busy_pid_reset(cpu);
- intel_pstate_idle_pid_reset(cpu);
intel_pstate_sample(cpu);
intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
- limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
- limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
return 0;
pr_info("Intel P-state driver initializing.\n");
- all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+ all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
rc = cpufreq_register_driver(&intel_pstate_driver);
if (rc)
priv.dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Cannot get memory resource\n");
- return -ENODEV;
- }
priv.base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
#include <linux/platform_device.h>
#include <asm/clock.h>
+#include <asm/idle.h>
#include <asm/mach-loongson/loongson.h>
LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+ local_irq_enable();
}
static int __init cpufreq_init(void)
dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
+ .cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
.cra_init = nx_crypto_ctx_aes_cbc_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_alignmask = 0xf,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
- nbytes -= AES_BLOCK_SIZE;
+ nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
* 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
* 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if (len + sctx->count <= SHA256_BLOCK_SIZE) {
+ if (len + sctx->count < SHA256_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count, data, len);
sctx->count += len;
goto out;
atomic_inc(&(nx_ctx->stats->sha256_ops));
/* copy the leftover back into the state struct */
- memcpy(sctx->buf, data + len - leftover, leftover);
+ if (leftover)
+ memcpy(sctx->buf, data + len - leftover, leftover);
sctx->count = leftover;
csbcpb->cpb.sha256.message_bit_length += (u64)
struct nx_sg *in_sg, *out_sg;
int rc;
+
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
atomic_inc(&(nx_ctx->stats->sha256_ops));
- atomic64_add(csbcpb->cpb.sha256.message_bit_length,
+ atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
&(nx_ctx->stats->sha256_bytes));
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
* 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
* 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
+ if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count[0], data, len);
sctx->count[0] += len;
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
/* copy the leftover back into the state struct */
- memcpy(sctx->buf, data + len - leftover, leftover);
+ if (leftover)
+ memcpy(sctx->buf, data + len - leftover, leftover);
sctx->count[0] = leftover;
spbc_bits = csbcpb->cpb.sha512.spbc * 8;
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
+ atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
&(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
{
struct nx_sg *nx_insg = nx_ctx->in_sg;
struct nx_sg *nx_outsg = nx_ctx->out_sg;
- struct blkcipher_walk walk;
- int rc;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- if (rc)
- goto out;
if (iv)
- memcpy(iv, walk.iv, AES_BLOCK_SIZE);
+ memcpy(iv, desc->info, AES_BLOCK_SIZE);
- while (walk.nbytes) {
- nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
- nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
-
- rc = blkcipher_walk_done(desc, &walk, 0);
- if (rc)
- break;
- }
-
- if (walk.nbytes) {
- nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
- nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
-
- rc = 0;
- }
+ nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
+ nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
* buffers */
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
-out:
- return rc;
+
+ return 0;
}
/**
if (rc)
goto out;
+ nx_driver.of.status = NX_OKAY;
+
rc = crypto_register_alg(&nx_ecb_aes_alg);
if (rc)
goto out;
if (rc)
goto out_unreg_s512;
- nx_driver.of.status = NX_OKAY;
-
goto out;
out_unreg_s512:
* Based on of-dma.c
*
* Copyright (C) 2013, Intel Corporation
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/ioport.h>
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
static LIST_HEAD(acpi_dma_list);
static DEFINE_MUTEX(acpi_dma_lock);
+/**
+ * acpi_dma_parse_resource_group - match device and parse resource group
+ * @grp: CSRT resource group
+ * @adev: ACPI device to match with
+ * @adma: struct acpi_dma of the given DMA controller
+ *
+ * Returns 1 on success, 0 when no information is available, or appropriate
+ * errno value on error.
+ *
+ * In order to match a device from DSDT table to the corresponding CSRT device
+ * we use MMIO address and IRQ.
+ */
+static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
+ struct acpi_device *adev, struct acpi_dma *adma)
+{
+ const struct acpi_csrt_shared_info *si;
+ struct list_head resource_list;
+ struct resource_list_entry *rentry;
+ resource_size_t mem = 0, irq = 0;
+ u32 vendor_id;
+ int ret;
+
+ if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (ret <= 0)
+ return 0;
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ if (resource_type(&rentry->res) == IORESOURCE_MEM)
+ mem = rentry->res.start;
+ else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
+ irq = rentry->res.start;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /* Consider initial zero values as resource not found */
+ if (mem == 0 && irq == 0)
+ return 0;
+
+ si = (const struct acpi_csrt_shared_info *)&grp[1];
+
+ /* Match device by MMIO and IRQ */
+ if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
+ return 0;
+
+ vendor_id = le32_to_cpu(grp->vendor_id);
+ dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
+ (char *)&vendor_id, grp->device_id, grp->revision);
+
+ /* Check if the request line range is available */
+ if (si->base_request_line == 0 && si->num_handshake_signals == 0)
+ return 0;
+
+ adma->base_request_line = si->base_request_line;
+ adma->end_request_line = si->base_request_line +
+ si->num_handshake_signals - 1;
+
+ dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
+ adma->base_request_line, adma->end_request_line);
+
+ return 1;
+}
+
+/**
+ * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
+ * @adev: ACPI device to match with
+ * @adma: struct acpi_dma of the given DMA controller
+ *
+ * CSRT or Core System Resources Table is a proprietary ACPI table
+ * introduced by Microsoft. This table can contain devices that are not in
+ * the system DSDT table. In particular DMA controllers might be described
+ * here.
+ *
+ * We are using this table to get the request line range of the specific DMA
+ * controller to be used later.
+ *
+ */
+static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
+{
+ struct acpi_csrt_group *grp, *end;
+ struct acpi_table_csrt *csrt;
+ acpi_status status;
+ int ret;
+
+ status = acpi_get_table(ACPI_SIG_CSRT, 0,
+ (struct acpi_table_header **)&csrt);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ dev_warn(&adev->dev, "failed to get the CSRT table\n");
+ return;
+ }
+
+ grp = (struct acpi_csrt_group *)(csrt + 1);
+ end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
+
+ while (grp < end) {
+ ret = acpi_dma_parse_resource_group(grp, adev, adma);
+ if (ret < 0) {
+ dev_warn(&adev->dev,
+ "error in parsing resource group\n");
+ return;
+ }
+
+ grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
+ }
+}
+
/**
* acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
* @dev: struct device of DMA controller
adma->acpi_dma_xlate = acpi_dma_xlate;
adma->data = data;
+ acpi_dma_parse_csrt(adev, adma);
+
/* Now queue acpi_dma controller structure in list */
mutex_lock(&acpi_dma_lock);
list_add_tail(&adma->dma_controllers, &acpi_dma_list);
}
EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
+/**
+ * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
+ * @adma: struct acpi_dma of DMA controller
+ * @dma_spec: dma specifier to update
+ *
+ * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
+ *
+ * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
+ * Descriptor":
+ * DMA Request Line bits is a platform-relative number uniquely
+ * identifying the request line assigned. Request line-to-Controller
+ * mapping is done in a controller-specific OS driver.
+ * That's why we can safely adjust slave_id when the appropriate controller is
+ * found.
+ */
+static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
+ struct acpi_dma_spec *dma_spec)
+{
+ /* Set link to the DMA controller device */
+ dma_spec->dev = adma->dev;
+
+ /* Check if the request line range is available */
+ if (adma->base_request_line == 0 && adma->end_request_line == 0)
+ return 0;
+
+ /* Check if slave_id falls to the range */
+ if (dma_spec->slave_id < adma->base_request_line ||
+ dma_spec->slave_id > adma->end_request_line)
+ return -1;
+
+ /*
+ * Here we adjust slave_id. It should be a relative number to the base
+ * request line.
+ */
+ dma_spec->slave_id -= adma->base_request_line;
+
+ return 1;
+}
+
struct acpi_dma_parser_data {
struct acpi_dma_spec dma_spec;
size_t index;
struct acpi_device *adev;
struct acpi_dma *adma;
struct dma_chan *chan = NULL;
+ int found;
/* Check if the device was enumerated by ACPI */
if (!dev || !ACPI_HANDLE(dev))
mutex_lock(&acpi_dma_lock);
list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
- dma_spec->dev = adma->dev;
+ /*
+ * We are not going to call translation function if slave_id
+ * doesn't fall to the request range.
+ */
+ found = acpi_dma_update_dma_spec(adma, dma_spec);
+ if (found < 0)
+ continue;
chan = adma->acpi_dma_xlate(dma_spec, adma);
- if (chan)
+ /*
+ * Try to get a channel only from the DMA controller that
+ * matches the slave_id. See acpi_dma_update_dma_spec()
+ * description for the details.
+ */
+ if (found > 0 || chan)
break;
}
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait,
- done.done || kthread_should_stop(),
+ wait_event_freezable_timeout(done_wait, done.done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
static int __restart_threaded_test(struct dmatest_info *info, bool run)
{
struct dmatest_params *params = &info->params;
- int ret;
/* Stop any running test first */
__stop_threaded_test(info);
memcpy(params, &info->dbgfs_params, sizeof(*params));
/* Run test with new parameters */
- ret = __run_threaded_test(info);
- if (ret) {
- __stop_threaded_test(info);
- pr_err("dmatest: Can't run test\n");
+ return __run_threaded_test(info);
+}
+
+static bool __is_threaded_test_run(struct dmatest_info *info)
+{
+ struct dmatest_chan *dtc;
+
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done)
+ return true;
+ }
}
- return ret;
+ return false;
}
static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
{
struct dmatest_info *info = file->private_data;
char buf[3];
- struct dmatest_chan *dtc;
- bool alive = false;
mutex_lock(&info->lock);
- list_for_each_entry(dtc, &info->channels, node) {
- struct dmatest_thread *thread;
-
- list_for_each_entry(thread, &dtc->threads, node) {
- if (!thread->done) {
- alive = true;
- break;
- }
- }
- }
- if (alive) {
+ if (__is_threaded_test_run(info)) {
buf[0] = 'Y';
} else {
__stop_threaded_test(info);
if (strtobool(buf, &bv) == 0) {
mutex_lock(&info->lock);
- ret = __restart_threaded_test(info, bv);
+
+ if (__is_threaded_test_run(info))
+ ret = -EBUSY;
+ else
+ ret = __restart_threaded_test(info, bv);
+
mutex_unlock(&info->lock);
}
return;
}
- if (d40_queue_start(d40c) == NULL)
+ if (d40_queue_start(d40c) == NULL) {
d40c->busy = false;
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
+
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
platform_set_drvdata(pdev, tdma);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "No mem resource for DMA\n");
- return -EINVAL;
- }
-
tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
amd64_inject_word_show, amd64_inject_word_store);
static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(inject_write, S_IWUSR,
NULL, amd64_inject_write_store);
-static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(inject_read, S_IWUSR,
NULL, amd64_inject_read_store);
struct efivar_entry *entry;
int err;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
-
/* Add new sysfs entries */
while (1) {
- memset(entry, 0, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
err = efivar_init(efivar_update_sysfs_entry, entry,
true, false, &efivar_sysfs_list);
config GPIO_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
- depends on SPI_MASTER || I2C
+ depends on (SPI_MASTER && !I2C) || I2C
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
I/O expanders.
resource_size_t start, len;
struct lnw_gpio *lnw;
u32 gpio_base;
+ u32 irq_base;
int retval;
int ngpio = id->driver_data;
retval = -EFAULT;
goto err_ioremap;
}
+ irq_base = *(u32 *)base;
gpio_base = *((u32 *)base + 1);
/* release the IO mapping, since we already get the info from bar1 */
iounmap(base);
goto err_ioremap;
}
- lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
- &lnw_gpio_irq_ops, lnw);
- if (!lnw->domain) {
- retval = -ENOMEM;
- goto err_ioremap;
- }
-
lnw->reg_base = base;
lnw->chip.label = dev_name(&pdev->dev);
lnw->chip.request = lnw_gpio_request;
lnw->chip.ngpio = ngpio;
lnw->chip.can_sleep = 0;
lnw->pdev = pdev;
+
+ lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
+ &lnw_gpio_irq_ops, lnw);
+ if (!lnw->domain) {
+ retval = -ENOMEM;
+ goto err_ioremap;
+ }
+
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
err_gpiochip_add:
while (--i >= 0) {
chip--;
- ret = gpiochip_remove(&chip->gpio);
- if (ret)
+ if (gpiochip_remove(&chip->gpio))
dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
}
kfree(chip_save);
* per-CPU registers */
if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Cannot get memory resource\n");
- return -ENODEV;
- }
-
mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
res);
if (IS_ERR(mvchip->percpu_membase))
err = bgpio_init(&port->bgc, &pdev->dev, 4,
port->base + PINCTRL_DIN(port),
- port->base + PINCTRL_DOUT(port), NULL,
+ port->base + PINCTRL_DOUT(port) + MXS_SET,
+ port->base + PINCTRL_DOUT(port) + MXS_CLR,
port->base + PINCTRL_DOE(port), NULL, 0);
if (err)
goto out_irqdesc_free;
bool is_mpuio;
bool dbck_flag;
bool loses_context;
+ bool context_valid;
int stride;
u32 width;
int context_loss_count;
bank->loses_context = true;
} else {
bank->loses_context = pdata->loses_context;
+
+ if (bank->loses_context)
+ bank->get_context_loss_count =
+ pdata->get_context_loss_count;
}
omap_gpio_chip_init(bank);
omap_gpio_show_rev(bank);
- if (bank->loses_context)
- bank->get_context_loss_count = pdata->get_context_loss_count;
-
pm_runtime_put(bank->dev);
list_add_tail(&bank->node, &omap_gpio_list);
return 0;
}
+static void omap_gpio_init_context(struct gpio_bank *p);
+
static int omap_gpio_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int c;
spin_lock_irqsave(&bank->lock, flags);
+
+ /*
+ * On the first resume during the probe, the context has not
+ * been initialised and so initialise it now. Also initialise
+ * the context loss count.
+ */
+ if (bank->loses_context && !bank->context_valid) {
+ omap_gpio_init_context(bank);
+
+ if (bank->get_context_loss_count)
+ bank->context_loss_count =
+ bank->get_context_loss_count(bank->dev);
+ }
+
_gpio_dbck_enable(bank);
/*
}
#if defined(CONFIG_PM_RUNTIME)
+static void omap_gpio_init_context(struct gpio_bank *p)
+{
+ struct omap_gpio_reg_offs *regs = p->regs;
+ void __iomem *base = p->base;
+
+ p->context.ctrl = __raw_readl(base + regs->ctrl);
+ p->context.oe = __raw_readl(base + regs->direction);
+ p->context.wake_en = __raw_readl(base + regs->wkup_en);
+ p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
+ p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
+ p->context.risingdetect = __raw_readl(base + regs->risingdetect);
+ p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
+ p->context.irqenable1 = __raw_readl(base + regs->irqenable);
+ p->context.irqenable2 = __raw_readl(base + regs->irqenable2);
+
+ if (regs->set_dataout && p->regs->clr_dataout)
+ p->context.dataout = __raw_readl(base + regs->set_dataout);
+ else
+ p->context.dataout = __raw_readl(base + regs->dataout);
+
+ p->context_valid = true;
+}
+
static void omap_gpio_restore_context(struct gpio_bank *bank)
{
__raw_writel(bank->context.wake_en,
#else
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
+static void omap_gpio_init_context(struct gpio_bank *p) {}
#endif
static const struct dev_pm_ops gpio_pm_ops = {
err_request_irq:
irq_free_descs(irq_base, gpio_pins[chip->ioh]);
- ret = gpiochip_remove(&chip->gpio);
- if (ret)
+ if (gpiochip_remove(&chip->gpio))
dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
err_gpiochip_add:
return 0;
err_sch_gpio_resume:
- err = gpiochip_remove(&sch_gpio_core);
- if (err)
- dev_err(&pdev->dev, "%s failed, %d\n",
- "gpiochip_remove()", err);
+ if (gpiochip_remove(&sch_gpio_core))
+ dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
err_sch_gpio_core:
release_region(res->start, resource_size(res));
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing MEM resource\n");
- return -ENODEV;
- }
-
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs))
return PTR_ERR(regs);
return ret;
err_gpiob:
- ret = gpiochip_remove(&vb_gpio->gpioa);
+ if (gpiochip_remove(&vb_gpio->gpioa))
+ dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
err_gpioa:
return ret;
{
struct drm_crtc *crtc;
+ /* Locking is currently fubar in the panic handler. */
+ if (oops_in_progress)
+ return;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));
else
return "unknown";
}
+EXPORT_SYMBOL(drm_get_connector_status_name);
/**
* drm_mode_object_get - allocate a new modeset identifier
connector->helper_private;
int count = 0;
int mode_flags = 0;
+ bool verbose_prune = true;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
drm_mode_connector_update_edid_property(connector, NULL);
+ verbose_prune = false;
goto prune;
}
}
prune:
- drm_mode_prune_invalid(dev, &connector->modes, true);
+ drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
if (list_empty(&connector->modes))
return 0;
continue;
connector->status = connector->funcs->detect(connector, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
- connector->base.id,
- drm_get_connector_name(connector),
- old_status, connector->status);
- if (old_status != connector->status)
+ if (old_status != connector->status) {
+ const char *old, *new;
+
+ old = drm_get_connector_status_name(old_status);
+ new = drm_get_connector_status_name(connector->status);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+ "status updated from %s to %s\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old, new);
+
changed = true;
+ }
}
mutex_unlock(&dev->mode_config.mutex);
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
- old_status, connector->status);
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
if (old_status != connector->status)
changed = true;
}
struct drm_file *file_priv);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
- const struct drm_ioctl_desc *ioctl;
+ const struct drm_ioctl_desc *ioctl = NULL;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
- DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
- task_pid_nr(current), cmd, nr,
- (long)old_encode_dev(file_priv->minor->device),
- file_priv->authenticated);
-
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
} else
goto err_i1;
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+ file_priv->authenticated, ioctl->name);
+
/* Do not trust userspace, use our own definition */
func = ioctl->func;
/* is there a local override? */
}
err_i1:
+ if (!ioctl)
+ DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+ file_priv->authenticated, cmd, nr);
+
if (kdata != stack_kdata)
kfree(kdata);
atomic_dec(&dev->ioctl_count);
struct i2c_adapter *adap,
const struct i2c_board_info *info)
{
- char modalias[sizeof(I2C_MODULE_PREFIX)
- + I2C_NAME_SIZE];
struct module *module = NULL;
struct i2c_client *client;
struct drm_i2c_encoder_driver *encoder_drv;
int err = 0;
- snprintf(modalias, sizeof(modalias),
- "%s%s", I2C_MODULE_PREFIX, info->type);
- request_module(modalias);
+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(adap, info);
if (!client) {
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
- /* vblank is not initialized (IRQ not installed ?) */
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
/*
{
unsigned long irqflags;
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
{
- struct drm_mm_node *entry;
- unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size;
- hole_start = drm_mm_hole_node_start(&mm->head_node);
- hole_end = drm_mm_hole_node_end(&mm->head_node);
- hole_size = hole_end - hole_start;
- if (hole_size)
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);
- total_free += hole_size;
+ return hole_size;
+ }
+
+ return 0;
+}
+
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+ struct drm_mm_node *entry;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+
+ total_free += drm_mm_dump_hole(m, &mm->head_node);
drm_mm_for_each_node(entry, mm) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
entry->start, entry->start + entry->size,
entry->size);
total_used += entry->size;
- if (entry->hole_follows) {
- hole_start = drm_mm_hole_node_start(entry);
- hole_end = drm_mm_hole_node_end(entry);
- hole_size = hole_end - hole_start;
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
- hole_start, hole_end, hole_size);
- total_free += hole_size;
- }
+ total_free += drm_mm_dump_hole(m, entry);
}
total = total_free + total_used;
was_digit = false;
} else
goto done;
+ break;
case '0' ... '9':
was_digit = true;
break;
unsigned int pipe;
unsigned int dpms;
enum exynos_crtc_mode mode;
+ wait_queue_head_t pending_flip_queue;
+ atomic_t pending_flip;
};
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
return;
}
+ if (mode > DRM_MODE_DPMS_ON) {
+ /* wait for the completion of page flip. */
+ wait_event(exynos_crtc->pending_flip_queue,
+ atomic_read(&exynos_crtc->pending_flip) == 0);
+ drm_vblank_off(crtc->dev, exynos_crtc->pipe);
+ }
+
exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
exynos_crtc->dpms = mode;
}
ret = drm_vblank_get(dev, exynos_crtc->pipe);
if (ret) {
DRM_DEBUG("failed to acquire vblank counter\n");
- list_del(&event->base.link);
goto out;
}
spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ atomic_set(&exynos_crtc->pending_flip, 1);
spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
exynos_crtc->pipe = nr;
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&exynos_crtc->pending_flip_queue);
+ atomic_set(&exynos_crtc->pending_flip, 0);
exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
if (!exynos_crtc->plane) {
kfree(exynos_crtc);
{
struct exynos_drm_private *dev_priv = dev->dev_private;
struct drm_pending_vblank_event *e, *t;
- struct timeval now;
+ struct drm_crtc *drm_crtc = dev_priv->crtc[crtc];
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
unsigned long flags;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (crtc != e->pipe)
continue;
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
-
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ list_del(&e->base.link);
+ drm_send_vblank_event(dev, -1, e);
drm_vblank_put(dev, crtc);
+ atomic_set(&exynos_crtc->pending_flip, 0);
+ wake_up(&exynos_crtc->pending_flip_queue);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
&exynos_gem_obj->base);
- if (IS_ERR_OR_NULL(helper->fb)) {
+ if (IS_ERR(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
*
*/
#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
}
ctx->irq = res->start;
- ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler,
IRQF_ONESHOT, "drm_fimc", ctx);
if (ret < 0) {
dev_err(dev, "failed to request irq.\n");
ret = fimc_setup_clocks(ctx);
if (ret < 0)
- goto err_free_irq;
+ return ret;
ippdrv = &ctx->ippdrv;
ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
goto err_pm_dis;
}
- dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+ dev_info(dev, "drm fimc registered successfully.\n");
return 0;
pm_runtime_disable(dev);
err_put_clk:
fimc_put_clocks(ctx);
-err_free_irq:
- free_irq(ctx->irq, ctx);
return ret;
}
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
- free_irq(ctx->irq, ctx);
-
return 0;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (pdev->dev.of_node) {
+ if (dev->of_node) {
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("memory allocation for pdata failed\n");
return ret;
}
} else {
- pdata = pdev->dev.platform_data;
+ pdata = dev->platform_data;
if (!pdata) {
DRM_ERROR("no platform data specified\n");
return -EINVAL;
return -EINVAL;
}
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_ioremap_resource(&pdev->dev, res);
+ ctx->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
ctx->irq = res->start;
- ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler,
+ ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler,
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
struct exynos_drm_subdrv *subdrv;
int ret;
- g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
+ g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
if (!g2d) {
dev_err(dev, "failed to allocate driver data\n");
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g2d->regs = devm_ioremap_resource(&pdev->dev, res);
+ g2d->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(g2d->regs)) {
ret = PTR_ERR(g2d->regs);
goto err_put_clk;
goto err_put_clk;
}
- ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
+ ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
"drm_g2d", g2d);
if (ret < 0) {
dev_err(dev, "irq request failed\n");
}
ctx->irq = res->start;
- ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
IRQF_ONESHOT, "drm_gsc", ctx);
if (ret < 0) {
dev_err(dev, "failed to request irq.\n");
ret = gsc_init_prop_list(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to init property list.\n");
- goto err_get_irq;
+ return ret;
}
DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
goto err_ippdrv_register;
}
- dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+ dev_info(dev, "drm gsc registered successfully.\n");
return 0;
err_ippdrv_register:
- devm_kfree(dev, ippdrv->prop_list);
pm_runtime_disable(dev);
-err_get_irq:
- free_irq(ctx->irq, ctx);
return ret;
}
struct gsc_context *ctx = get_gsc_context(dev);
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
- devm_kfree(dev, ippdrv->prop_list);
exynos_drm_ippdrv_unregister(ippdrv);
mutex_destroy(&ctx->lock);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
- free_irq(ctx->irq, ctx);
-
return 0;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_LOG_KMS("failed to alloc common hdmi context.\n");
return -ENOMEM;
/* find ipp driver using idr */
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
ipp_id);
- if (IS_ERR_OR_NULL(ippdrv)) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("not found ipp%d driver.\n", ipp_id);
return ippdrv;
}
DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
ippdrv = ipp_find_drv_by_handle(prop_id);
- if (IS_ERR_OR_NULL(ippdrv)) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("failed to get ipp driver.\n");
return -EINVAL;
}
/* find ipp driver using ipp id */
ippdrv = ipp_find_driver(ctx, property);
- if (IS_ERR_OR_NULL(ippdrv)) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("failed to get ipp driver.\n");
return -EINVAL;
}
c_node->state = IPP_STATE_IDLE;
c_node->start_work = ipp_create_cmd_work();
- if (IS_ERR_OR_NULL(c_node->start_work)) {
+ if (IS_ERR(c_node->start_work)) {
DRM_ERROR("failed to create start work.\n");
goto err_clear;
}
c_node->stop_work = ipp_create_cmd_work();
- if (IS_ERR_OR_NULL(c_node->stop_work)) {
+ if (IS_ERR(c_node->stop_work)) {
DRM_ERROR("failed to create stop work.\n");
goto err_free_start;
}
c_node->event_work = ipp_create_event_work();
- if (IS_ERR_OR_NULL(c_node->event_work)) {
+ if (IS_ERR(c_node->event_work)) {
DRM_ERROR("failed to create event work.\n");
goto err_free_stop;
}
DRM_DEBUG_KMS("%s\n", __func__);
ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
- if (IS_ERR_OR_NULL(ippdrv)) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("failed to get ipp driver.\n");
return -EFAULT;
}
struct exynos_drm_subdrv *subdrv;
int ret;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
goto err_cmd_workq;
}
- dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+ dev_info(dev, "drm ipp registered successfully.\n");
return 0;
return rot->irq;
}
- ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
- IRQF_ONESHOT, "drm_rotator", rot);
+ ret = devm_request_threaded_irq(dev, rot->irq, NULL,
+ rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
return ret;
rot->clock = devm_clk_get(dev, "rotator");
if (IS_ERR(rot->clock)) {
dev_err(dev, "failed to get clock\n");
- ret = PTR_ERR(rot->clock);
- goto err_clk_get;
+ return PTR_ERR(rot->clock);
}
pm_runtime_enable(dev);
return 0;
err_ippdrv_register:
- devm_kfree(dev, ippdrv->prop_list);
pm_runtime_disable(dev);
-err_clk_get:
- free_irq(rot->irq, rot);
return ret;
}
struct rot_context *rot = dev_get_drvdata(dev);
struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
- devm_kfree(dev, ippdrv->prop_list);
exynos_drm_ippdrv_unregister(ippdrv);
pm_runtime_disable(dev);
- free_irq(rot->irq, rot);
-
return 0;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ ret = device_create_file(dev, &dev_attr_connection);
if (ret < 0)
DRM_INFO("failed to create connection sysfs.\n");
DRM_DEBUG_KMS("[%d]\n", __LINE__);
- if (pdev->dev.of_node) {
+ if (dev->of_node) {
pdata = drm_hdmi_dt_parse_pdata(dev);
if (IS_ERR(pdata)) {
DRM_ERROR("failed to parse dt\n");
return PTR_ERR(pdata);
}
} else {
- pdata = pdev->dev.platform_data;
+ pdata = dev->platform_data;
}
if (!pdata) {
return -EINVAL;
}
- drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+ drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
GFP_KERNEL);
if (!drm_hdmi_ctx) {
DRM_ERROR("failed to allocate common hdmi context.\n");
return -ENOMEM;
}
- hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context),
+ hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
GFP_KERNEL);
if (!hdata) {
DRM_ERROR("out of memory\n");
if (dev->of_node) {
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
- pdev->dev.of_node);
+ dev->of_node);
if (match == NULL)
return -ENODEV;
hdata->type = (enum hdmi_type)match->data;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("failed to find registers\n");
- return -ENOENT;
- }
-
- hdata->regs = devm_ioremap_resource(&pdev->dev, res);
+ hdata->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hdata->regs))
return PTR_ERR(hdata->regs);
- ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
+ ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
return ret;
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- ret = request_threaded_irq(hdata->irq, NULL,
+ ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi", drm_hdmi_ctx);
static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
- struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
pm_runtime_disable(dev);
- free_irq(hdata->irq, hdata);
-
-
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
/* DDC i2c driver */
return -ENXIO;
}
- mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
+ mixer_res->mixer_regs = devm_ioremap(dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
- ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
+ ret = devm_request_irq(dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
return -ENXIO;
}
- mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
+ mixer_res->vp_regs = devm_ioremap(dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
dev_info(dev, "probe start\n");
- drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+ drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
GFP_KERNEL);
if (!drm_hdmi_ctx) {
DRM_ERROR("failed to allocate common hdmi context.\n");
return -ENOMEM;
}
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_ERROR("failed to alloc mixer context.\n");
return -ENOMEM;
if (dev->of_node) {
const struct of_device_id *match;
match = of_match_node(of_match_ptr(mixer_match_types),
- pdev->dev.of_node);
+ dev->of_node);
drv = (struct mixer_drv_data *)match->data;
} else {
drv = (struct mixer_drv_data *)
platform_get_device_id(pdev)->driver_data;
}
- ctx->dev = &pdev->dev;
+ ctx->dev = dev;
ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
+ INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
+ INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
+ INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
+ INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
+ INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
- INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
- INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
- INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
+ INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
+ INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
+ INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
+ INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
+ INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
+ INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
- INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
- INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
+ INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
+ INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
+ INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
+ INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
+ INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
+ INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
+ INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
+ INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
+ INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
+ INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
+ INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
+ INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
return (void __user *)(uintptr_t)address;
}
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long
+timespec_to_jiffies_timeout(const struct timespec *value)
+{
+ unsigned long j = timespec_to_jiffies(value);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
#endif
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error))
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+ i915_terminally_wedged(error))
if (EXIT_COND)
return 0;
- /* GPU is already declared terminally dead, give up. */
- if (i915_terminally_wedged(error))
- return -EIO;
-
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
wait_forever = false;
}
- timeout_jiffies = timespec_to_jiffies(&wait_time);
+ timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
if (timeout) {
struct timespec sleep_time = timespec_sub(now, before);
*timeout = timespec_sub(*timeout, sleep_time);
+ if (!timespec_valid(timeout)) /* i.e. negative time remains */
+ set_normalized_timespec(timeout, 0, 0);
}
switch (end) {
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
- if (timeout)
- set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
- if (timeout) {
- WARN_ON(!timespec_valid(timeout));
+ if (timeout)
args->timeout_ns = timespec_to_ns(timeout);
- }
return ret;
out:
return snb_gmch_ctl << 25; /* 32 MB units */
}
-static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
-{
- static const int stolen_decoder[] = {
- 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
- snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
- return stolen_decoder[snb_gmch_ctl] << 20;
-}
-
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
- *stolen = gen7_get_stolen_size(snb_gmch_ctl);
- else
- *stolen = gen6_get_stolen_size(snb_gmch_ctl);
-
+ *stolen = gen6_get_stolen_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
/* For Modern GENs the PTEs and register space are split in the BAR */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
-#define IVB_GMCH_GMS_SHIFT 4
-#define IVB_GMCH_GMS_MASK 0xf
/* PCI config space */
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
+ if (port != PORT_A)
+ intel_dp_stop_link_train(intel_dp);
}
}
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ if (port == PORT_A)
+ intel_dp_stop_link_train(intel_dp);
+
ironlake_edp_backlight_on(intel_dp);
}
memset(&pipe_config, 0, sizeof(pipe_config));
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
+
+ /* hw state is inconsistent with the pipe A quirk */
+ if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ active = crtc->active;
+
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
}
}
+static bool
+is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
+ int num_connectors)
+{
+ int i;
+
+ for (i = 0; i < num_connectors; i++)
+ if (connectors[i].encoder &&
+ connectors[i].encoder->crtc == crtc &&
+ connectors[i].dpms != DRM_MODE_DPMS_ON)
+ return true;
+
+ return false;
+}
+
static void
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
struct intel_set_config *config)
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->crtc->fb != set->fb) {
+ if (set->connectors != NULL &&
+ is_crtc_connector_off(set->crtc, *set->connectors,
+ set->num_connectors)) {
+ config->mode_changed = true;
+ } else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
} else if (set->fb->pixel_format !=
set->crtc->fb->pixel_format) {
config->mode_changed = true;
- } else
+ } else {
config->fb_changed = true;
+ }
}
if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
- if (ret) {
- DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
- set->crtc->base.id, ret);
- goto fail;
- }
} else if (config->fb_changed) {
intel_crtc_wait_for_pending_flips(set->crtc);
set->x, set->y, set->fb);
}
- intel_set_config_free(config);
-
- return 0;
-
+ if (ret) {
+ DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+ set->crtc->base.id, ret);
fail:
- intel_set_config_restore_state(dev, config);
+ intel_set_config_restore_state(dev, config);
- /* Try to restore the config */
- if (config->mode_changed &&
- intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
- DRM_ERROR("failed to restore config after modeset failure\n");
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+ }
out_config:
intel_set_config_free(config);
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
- msecs_to_jiffies(10));
+ msecs_to_jiffies_timeout(10));
else
done = wait_for_atomic(C, 10) == 0;
if (!done)
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+ if (is_edp(intel_dp) && dev_priv->edp.bpp)
+ bpp = min_t(int, bpp, dev_priv->edp.bpp);
+
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(target_clock, bpp);
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->pipe_bpp = bpp;
pipe_config->pixel_target_clock = target_clock;
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
target_clock, adjusted_mode->clock,
&pipe_config->dp_m_n);
- /*
- * XXX: We have a strange regression where using the vbt edp bpp value
- * for the link bw computation results in black screens, the panel only
- * works when we do the computation at the usual 24bpp (but still
- * requires us to use 18bpp). Until that's fully debugged, stay
- * bug-for-bug compatible with the old code.
- */
- if (is_edp(intel_dp) && dev_priv->edp.bpp) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
- bpp, dev_priv->edp.bpp);
- bpp = min_t(int, bpp, dev_priv->edp.bpp);
- }
- pipe_config->pipe_bpp = bpp;
-
return true;
}
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
}
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
- uint32_t temp;
if (HAS_DDI(dev)) {
- temp = I915_READ(DP_TP_CTL(port));
+ uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
-
- if (port != PORT_A) {
- temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), temp);
-
- if (wait_for((I915_READ(DP_TP_STATUS(port)) &
- DP_TP_STATUS_IDLE_DONE), 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
-
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- }
-
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
return true;
}
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ uint32_t val;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ /*
+ * On PORT_A we can have only eDP in SST mode. There the only reason
+ * we need to set idle transmission mode is to work around a HW issue
+ * where we enable the pipe while not in idle link-training mode.
+ * In this case there is requirement to wait for a minimum number of
+ * idle patterns to be sent.
+ */
+ if (port == PORT_A)
+ return;
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+ 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
/* Enable corresponding port and start training pattern 1 */
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
++tries;
}
+ intel_dp_set_idle_link_train(intel_dp);
+
+ intel_dp->DP = DP;
+
if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
- intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+ intel_dp_set_link_train(intel_dp, intel_dp->DP,
+ DP_TRAINING_PATTERN_DISABLE);
}
static void
drm_get_encoder_name(&intel_encoder->base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
}
}
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
void intel_fbdev_set_suspend(struct drm_device *dev, int state)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!dev_priv->fbdev)
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct fb_info *info;
+
+ if (!ifbdev)
return;
- fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+ info = ifbdev->helper.fbdev;
+
+ /* On resume from hibernation: If the object is shmemfs backed, it has
+ * been restored from swap. If the object is stolen however, it will be
+ * full of whatever garbage was left in there.
+ */
+ if (!state && ifbdev->ifb.obj->stolen)
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ fb_set_suspend(info, state);
}
MODULE_LICENSE("GPL and additional rights");
* need to wake up periodically and check that ourselves. */
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
- for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
+ for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
- ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(10));
I915_WRITE(GMBUS4 + reg_offset, 0);
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .ident = "Hewlett-Packard HP t5740",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
},
},
{
vlv_update_drain_latency(dev);
- if (g4x_compute_wm0(dev, 0,
+ if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
- enabled |= 1;
+ enabled |= 1 << PIPE_A;
- if (g4x_compute_wm0(dev, 1,
+ if (g4x_compute_wm0(dev, PIPE_B,
&valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
- enabled |= 2;
+ enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
int plane_sr, cursor_sr;
unsigned int enabled = 0;
- if (g4x_compute_wm0(dev, 0,
+ if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
- enabled |= 1;
+ enabled |= 1 << PIPE_A;
- if (g4x_compute_wm0(dev, 1,
+ if (g4x_compute_wm0(dev, PIPE_B,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
- enabled |= 2;
+ enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
unsigned int enabled;
enabled = 0;
- if (g4x_compute_wm0(dev, 0,
+ if (g4x_compute_wm0(dev, PIPE_A,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 1;
+ enabled |= 1 << PIPE_A;
}
- if (g4x_compute_wm0(dev, 1,
+ if (g4x_compute_wm0(dev, PIPE_B,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 2;
+ enabled |= 1 << PIPE_B;
}
/*
unsigned int enabled;
enabled = 0;
- if (g4x_compute_wm0(dev, 0,
+ if (g4x_compute_wm0(dev, PIPE_A,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 1;
+ enabled |= 1 << PIPE_A;
}
- if (g4x_compute_wm0(dev, 1,
+ if (g4x_compute_wm0(dev, PIPE_B,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 2;
+ enabled |= 1 << PIPE_B;
}
/*
unsigned int enabled;
enabled = 0;
- if (g4x_compute_wm0(dev, 0,
+ if (g4x_compute_wm0(dev, PIPE_A,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 1;
+ enabled |= 1 << PIPE_A;
}
- if (g4x_compute_wm0(dev, 1,
+ if (g4x_compute_wm0(dev, PIPE_B,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 2;
+ enabled |= 1 << PIPE_B;
}
- if (g4x_compute_wm0(dev, 2,
+ if (g4x_compute_wm0(dev, PIPE_C,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled |= 3;
+ enabled |= 1 << PIPE_C;
}
/*
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->i2c);
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
if (list_empty(&connector->probed_modes) == false)
goto end;
static inline void mga_wait_vsync(struct mga_device *mdev)
{
- unsigned int count = 0;
+ unsigned long timeout = jiffies + HZ/10;
unsigned int status = 0;
do {
status = RREG32(MGAREG_Status);
- count++;
- } while ((status & 0x08) && (count < 250000));
- count = 0;
+ } while ((status & 0x08) && time_before(jiffies, timeout));
+ timeout = jiffies + HZ/10;
status = 0;
do {
status = RREG32(MGAREG_Status);
- count++;
- } while (!(status & 0x08) && (count < 250000));
+ } while (!(status & 0x08) && time_before(jiffies, timeout));
}
static inline void mga_wait_busy(struct mga_device *mdev)
{
- unsigned int count = 0;
+ unsigned long timeout = jiffies + HZ;
unsigned int status = 0;
do {
status = RREG8(MGAREG_Status + 2);
- count++;
- } while ((status & 0x01) && (count < 500000));
+ } while ((status & 0x01) && time_before(jiffies, timeout));
}
/*
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ WREG8(DAC_DATA, tmp);
/* select PLL Set C */
tmp = RREG8(MGAREG_MEM_MISC_READ);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
WREG8(DAC_INDEX, MGA1064_VREF_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~0x04;
- WREG_DAC(MGA1064_VREF_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(50);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ WREG8(DAC_DATA, tmp);
/* reset dotclock rate bit */
WREG8(MGAREG_SEQ_INDEX, 1);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
vcount = RREG8(MGAREG_VCOUNT);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= 0x3 << 2;
WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
tmp = RREG8(DAC_DATA);
- WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+ WREG8(DAC_DATA, tmp & ~0x40);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
tmp = RREG8(DAC_DATA);
- WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+ WREG8(DAC_DATA, tmp | 0x40);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= (0x3 << 2);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
return 0;
}
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= 0x3 << 2;
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
vcount = RREG8(MGAREG_VCOUNT);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ WREG8(DAC_DATA, tmp);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= (0x3<<2) | 0xc0;
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
}
-
+/*
+ This is how the framebuffer base address is stored in g200 cards:
+ * Assume @offset is the gpu_addr variable of the framebuffer object
+ * Then addr is the number of _pixels_ (not bytes) from the start of
+ VRAM to the first pixel we want to display. (divided by 2 for 32bit
+ framebuffers)
+ * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+ addr<20> -> CRTCEXT0<6>
+ addr<19-16> -> CRTCEXT0<3-0>
+ addr<15-8> -> CRTCC<7-0>
+ addr<7-0> -> CRTCD<7-0>
+ CRTCEXT0 has to be programmed last to trigger an update and make the
+ new addr variable take effect.
+ */
void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct mga_device *mdev = crtc->dev->dev_private;
u32 addr;
int count;
+ u8 crtcext0;
while (RREG8(0x1fda) & 0x08);
while (!(RREG8(0x1fda) & 0x08));
count = RREG8(MGAREG_VCOUNT) + 2;
while (RREG8(MGAREG_VCOUNT) < count);
- addr = offset >> 2;
+ WREG8(MGAREG_CRTCEXT_INDEX, 0);
+ crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+ crtcext0 &= 0xB0;
+ addr = offset / 8;
+ /* Can't store addresses any higher than that...
+ but we also don't have more than 16MB of memory, so it should be fine. */
+ WARN_ON(addr > 0x1fffff);
+ crtcext0 |= (!!(addr & (1<<20)))<<6;
WREG_CRT(0x0d, (u8)(addr & 0xff));
WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
- WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+ WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
}
for (i = 0; i < sizeof(dacvalue); i++) {
- if ((i <= 0x03) ||
- (i == 0x07) ||
- (i == 0x0b) ||
- (i == 0x0f) ||
- ((i >= 0x13) && (i <= 0x17)) ||
+ if ((i <= 0x17) ||
(i == 0x1b) ||
(i == 0x1c) ||
((i >= 0x1f) && (i <= 0x29)) ||
else
hi_pri_lvl = 5;
- WREG8(0x1fde, 0x06);
- WREG8(0x1fdf, hi_pri_lvl);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
} else {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
if (mdev->reg_1e24 >= 0x01)
- WREG8(0x1fdf, 0x03);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x03);
else
- WREG8(0x1fdf, 0x04);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x04);
}
}
return 0;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xce:
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc8:
{
const u32 doff = (or * 0x800);
int load = -EINVAL;
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
- udelay(9500);
+ mdelay(9);
+ udelay(500);
nv_wr32(priv, 0x61a00c + doff, 0x80000000);
load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
return load;
}
nv_wr32(priv, 0x616510 + hoff, 0x00000000);
nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
/* ??? */
nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
* FIFO channel objects
******************************************************************************/
-void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+static void
+nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
nv_wr32(priv, 0x002500, 0x00000101);
}
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+ mutex_lock(&nv_subdev(priv)->mutex);
+ nv50_fifo_playlist_update_locked(priv);
+ mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
static int
nv50_fifo_context_attach(struct nouveau_object *parent,
struct nouveau_object *object)
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
- nv50_fifo_playlist_update(priv);
+ nv50_fifo_playlist_update_locked(priv);
nv_wr32(priv, 0x003200, 0x00000001);
nv_wr32(priv, 0x003250, 0x00000001);
struct nouveau_gpuobj *cur;
int i, p;
+ mutex_lock(&nv_subdev(priv)->mutex);
cur = priv->playlist[priv->cur_playlist];
priv->cur_playlist = !priv->cur_playlist;
nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
nv_error(priv, "playlist update failed\n");
+ mutex_unlock(&nv_subdev(priv)->mutex);
}
static int
struct nvc0_fifo_priv *priv = (void *)object->engine;
struct nvc0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
+ u32 mask, engine;
nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
nvc0_fifo_playlist_update(priv);
+ mask = nv_rd32(priv, 0x0025a4);
+ for (engine = 0; mask && engine < 16; engine++) {
+ if (!(mask & (1 << engine)))
+ continue;
+ nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
+ mask &= ~(1 << engine);
+ }
nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
u32 match = (engine << 16) | 0x00000001;
int i, p;
+ mutex_lock(&nv_subdev(priv)->mutex);
cur = engn->playlist[engn->cur_playlist];
if (unlikely(cur == NULL)) {
int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
0x8000, 0x1000, 0, &cur);
if (ret) {
+ mutex_unlock(&nv_subdev(priv)->mutex);
nv_error(priv, "playlist alloc failed\n");
return;
}
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
nv_error(priv, "playlist %d update timeout\n", engine);
+ mutex_unlock(&nv_subdev(priv)->mutex);
}
static int
#define NV50_DISP_DAC_PWR_STATE 0x00000040
#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
-#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD 0x00020100
#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
#define NV50_DISP_PIOR_MTHD 0x00030000
trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
init->offset += 13;
- data = init_rd32(init, addr) & mask;
- data |= ((data + add) & ~mask);
+ data = init_rd32(init, addr);
+ data = (data & mask) | ((data + add) & ~mask);
init_wr32(init, addr, data);
}
struct nvc0_ltcg_priv {
struct nouveau_ltcg base;
u32 part_nr;
- u32 part_mask;
u32 subp_nr;
struct nouveau_mm tags;
u32 num_tags;
/* wait until it's finished with clearing */
for (p = 0; p < priv->part_nr; ++p) {
- if (!(priv->part_mask & (1 << p)))
- continue;
for (i = 0; i < priv->subp_nr; ++i)
nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
}
int ret;
nv_wr32(priv, 0x17e8d8, priv->part_nr);
+ if (nv_device(pfb)->card_type >= NV_E0)
+ nv_wr32(priv, 0x17e000, priv->part_nr);
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram.size >> 17) / 4;
{
struct nvc0_ltcg_priv *priv;
struct nouveau_fb *pfb = nouveau_fb(parent);
- int ret;
+ u32 parts, mask;
+ int ret, i;
ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->part_nr = nv_rd32(priv, 0x022438);
- priv->part_mask = nv_rd32(priv, 0x022554);
-
+ parts = nv_rd32(priv, 0x022438);
+ mask = nv_rd32(priv, 0x022554);
+ for (i = 0; i < parts; i++) {
+ if (!(mask & (1 << i)))
+ priv->part_nr++;
+ }
priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
}
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
- if (s->event) {
- struct drm_pending_vblank_event *e = s->event;
- struct timeval now;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
+ if (s->event)
+ drm_send_vblank_event(dev, -1, s->event);
list_del(&s->head);
if (ps)
NV_INFO(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+ NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+ if (drm->cechan) {
+ ret = nouveau_channel_idle(drm->cechan);
+ if (ret)
+ return ret;
+ }
+
+ if (drm->channel) {
+ ret = nouveau_channel_idle(drm->channel);
+ if (ret)
+ return ret;
+ }
+
+ NV_INFO(drm, "suspending client object trees...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm))
return -ENOMEM;
}
- NV_INFO(drm, "suspending client object trees...\n");
list_for_each_entry(cli, &drm->clients, head) {
ret = nouveau_client_fini(&cli->base, true);
if (ret)
goto fail_client;
}
+ NV_INFO(drm, "suspending kernel object tree...\n");
ret = nouveau_client_fini(&drm->client.base, true);
if (ret)
goto fail_client;
nouveau_agp_reset(drm);
- NV_INFO(drm, "resuming client object trees...\n");
+ NV_INFO(drm, "resuming kernel object tree...\n");
nouveau_client_init(&drm->client.base);
nouveau_agp_init(drm);
+ NV_INFO(drm, "resuming client object trees...\n");
+ if (drm->fence && nouveau_fence(drm)->resume)
+ nouveau_fence(drm)->resume(drm);
+
list_for_each_entry(cli, &drm->clients, head) {
nouveau_client_init(&cli->base);
}
- if (drm->fence && nouveau_fence(drm)->resume)
- nouveau_fence(drm)->resume(drm);
-
nouveau_run_vbios_init(dev);
nouveau_pm_resume(dev);
{
struct nv50_disp *disp = nv50_disp(encoder->dev);
int ret, or = nouveau_encoder(encoder)->or;
- u32 load = 0;
+ u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
+ if (load == 0)
+ load = 340;
ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
if (ret || load != 7)
* mgr->id.) Eventually this will be replaced w/ something
* more common-panel-framework-y
*/
- struct omap_overlay_manager mgr;
+ struct omap_overlay_manager *mgr;
struct omap_video_timings timings;
bool enabled;
* job of sequencing the setup of the video pipe in the proper order
*/
+/* ovl-mgr-id -> crtc */
+static struct omap_crtc *omap_crtcs[8];
+
/* we can probably ignore these until we support command-mode panels: */
+static int omap_crtc_connect(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst)
+{
+ if (mgr->output)
+ return -EINVAL;
+
+ if ((mgr->supported_outputs & dst->id) == 0)
+ return -EINVAL;
+
+ dst->manager = mgr;
+ mgr->output = dst;
+
+ return 0;
+}
+
+static void omap_crtc_disconnect(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst)
+{
+ mgr->output->manager = NULL;
+ mgr->output = NULL;
+}
+
static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
{
}
static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
const struct omap_video_timings *timings)
{
- struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
DBG("%s", omap_crtc->name);
omap_crtc->timings = *timings;
omap_crtc->full_update = true;
static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
const struct dss_lcd_mgr_config *config)
{
- struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
DBG("%s", omap_crtc->name);
dispc_mgr_set_lcd_config(omap_crtc->channel, config);
}
}
static const struct dss_mgr_ops mgr_ops = {
+ .connect = omap_crtc_connect,
+ .disconnect = omap_crtc_disconnect,
.start_update = omap_crtc_start_update,
.enable = omap_crtc_enable,
.disable = omap_crtc_disable,
} else {
if (encoder) {
omap_encoder_set_enabled(encoder, false);
- omap_encoder_update(encoder, &omap_crtc->mgr,
+ omap_encoder_update(encoder, omap_crtc->mgr,
&omap_crtc->timings);
omap_encoder_set_enabled(encoder, true);
omap_crtc->full_update = false;
[OMAP_DSS_CHANNEL_LCD2] = "lcd2",
};
+void omap_crtc_pre_init(void)
+{
+ dss_install_mgr_ops(&mgr_ops);
+}
+
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id)
omap_irq_register(dev, &omap_crtc->error_irq);
/* temporary: */
- omap_crtc->mgr.id = channel;
-
- dss_install_mgr_ops(&mgr_ops);
+ omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
/* TODO: fix hard-coded setup.. add properties! */
info = &omap_crtc->info;
omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+ omap_crtcs[channel] = omap_crtc;
+
return crtc;
fail:
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DPI:
- if (!strcmp(dssdev->name, "dvi"))
- return DRM_MODE_CONNECTOR_DVID;
- /* fallthrough */
+ case OMAP_DISPLAY_TYPE_DVI:
+ return DRM_MODE_CONNECTOR_DVID;
default:
return DRM_MODE_CONNECTOR_Unknown;
}
int num_mgrs = dss_feat_get_num_mgrs();
int num_crtcs;
int i, id = 0;
+ int r;
+
+ omap_crtc_pre_init();
drm_mode_config_init(dev);
struct drm_connector *connector;
struct drm_encoder *encoder;
enum omap_channel channel;
+ struct omap_overlay_manager *mgr;
if (!dssdev->driver) {
dev_warn(dev->dev, "%s has no driver.. skipping it\n",
continue;
}
+ r = dssdev->driver->connect(dssdev);
+ if (r) {
+ dev_err(dev->dev, "could not connect display: %s\n",
+ dssdev->name);
+ continue;
+ }
+
encoder = omap_encoder_init(dev, dssdev);
if (!encoder) {
* other possible channels to which the encoder can connect are
* not considered.
*/
- channel = dssdev->output->dispc_channel;
+ mgr = omapdss_find_mgr_from_display(dssdev);
+ channel = mgr->id;
/*
* if this channel hasn't already been taken by a previously
* allocated crtc, we create a new crtc for it
struct drm_encoder *encoder = priv->encoders[i];
struct omap_dss_device *dssdev =
omap_encoder_get_dssdev(encoder);
+ struct omap_dss_device *output;
+
+ output = omapdss_find_output_from_display(dssdev);
/* figure out which crtc's we can connect the encoder to: */
encoder->possible_crtcs = 0;
supported_outputs =
dss_feat_get_supported_outputs(crtc_channel);
- if (supported_outputs & dssdev->output->id)
+ if (supported_outputs & output->id)
encoder->possible_crtcs |= (1 << id);
}
+
+ omap_dss_put_device(output);
}
DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
static int pdev_probe(struct platform_device *device)
{
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
DBG("%s", device->name);
return drm_platform_init(&omap_drm_driver, device);
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
int omap_crtc_apply(struct drm_crtc *crtc,
struct omap_drm_apply *apply);
+void omap_crtc_pre_init(void);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id);
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
+ select FB_DEFERRED_IO
select DRM_KMS_HELPER
select DRM_TTM
help
return 0;
}
-static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
{
int irq_num;
long addr = qdev->io_base + port;
mutex_lock(&qdev->async_io_mutex);
irq_num = atomic_read(&qdev->irq_received_io_cmd);
-
-
if (qdev->last_sent_io_cmd > irq_num) {
- ret = wait_event_interruptible(qdev->io_cmd_event,
- atomic_read(&qdev->irq_received_io_cmd) > irq_num);
- if (ret)
+ if (intr)
+ ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+ atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+ else
+ ret = wait_event_timeout(qdev->io_cmd_event,
+ atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+ /* 0 is timeout, just bail the "hw" has gone away */
+ if (ret <= 0)
goto out;
irq_num = atomic_read(&qdev->irq_received_io_cmd);
}
outb(val, addr);
qdev->last_sent_io_cmd = irq_num + 1;
- ret = wait_event_interruptible(qdev->io_cmd_event,
- atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+ if (intr)
+ ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+ atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+ else
+ ret = wait_event_timeout(qdev->io_cmd_event,
+ atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
out:
+ if (ret > 0)
+ ret = 0;
mutex_unlock(&qdev->async_io_mutex);
return ret;
}
int ret;
restart:
- ret = wait_for_io_cmd_user(qdev, val, port);
+ ret = wait_for_io_cmd_user(qdev, val, port, false);
if (ret == -ERESTARTSYS)
goto restart;
}
mutex_lock(&qdev->update_area_mutex);
qdev->ram_header->update_area = *area;
qdev->ram_header->update_surface = surface_id;
- ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+ ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
mutex_unlock(&qdev->update_area_mutex);
return ret;
}
int inc = 1;
qobj = gem_to_qxl_bo(qxl_fb->obj);
- if (qxl_fb != qdev->active_user_framebuffer) {
- DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
- __func__, qxl_fb, qdev->active_user_framebuffer);
- }
+ /* if we aren't primary surface ignore this */
+ if (!qobj->is_primary)
+ return 0;
+
if (!num_clips) {
num_clips = 1;
clips = &norect;
mode->hdisplay,
mode->vdisplay);
}
- qdev->mode_set = true;
return 0;
}
{
struct drm_gem_object *obj;
struct qxl_framebuffer *qxl_fb;
- struct qxl_device *qdev = dev->dev_private;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
return NULL;
}
- if (qdev->active_user_framebuffer) {
- DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
- __func__,
- qdev->active_user_framebuffer, qxl_fb);
- }
- qdev->active_user_framebuffer = qxl_fb;
-
return &qxl_fb->base;
}
struct qxl_gem gem;
struct qxl_mode_info mode_info;
- /*
- * last created framebuffer with fb_create
- * only used by debugfs dumbppm
- */
- struct qxl_framebuffer *active_user_framebuffer;
-
struct fb_info *fbdev_info;
struct qxl_framebuffer *fbdev_qfb;
void *ram_physical;
struct qxl_ring *cursor_ring;
struct qxl_ram_header *ram_header;
- bool mode_set;
bool primary_created;
struct qxl_bo *cmd_bo;
int release_type;
struct drm_qxl_command *commands =
- (struct drm_qxl_command *)execbuffer->commands;
+ (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
sizeof(user_cmd)))
for (i = 0 ; i < user_cmd.relocs_num; ++i) {
if (DRM_COPY_FROM_USER(&reloc,
- &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+ &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
sizeof(reloc))) {
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
goto out;
if (!qobj->pin_count) {
+ qxl_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
- DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
- (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+ DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+ (unsigned long long)qdev->vram_base,
+ (unsigned long long)pci_resource_end(pdev, 0),
(int)pci_resource_len(pdev, 0) / 1024 / 1024,
(int)pci_resource_len(pdev, 0) / 1024,
- (void *)qdev->surfaceram_base,
- (void *)pci_resource_end(pdev, 1),
+ (unsigned long long)qdev->surfaceram_base,
+ (unsigned long long)pci_resource_end(pdev, 1),
(int)qdev->surfaceram_size / 1024 / 1024,
(int)qdev->surfaceram_size / 1024);
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- radeon_crtc->in_mode_set = true;
-
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_DISABLE);
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
- radeon_crtc->in_mode_set = false;
}
static void atombios_crtc_disable(struct drm_crtc *crtc)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
- save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ if (!ASIC_IS_NODCE(rdev)) {
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- /* disable VGA render */
- WREG32(VGA_RENDER_CONTROL, 0);
+ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+ }
/* blank the display controllers */
for (i = 0; i < rdev->num_crtc; i++) {
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)rdev->mc.vram_start);
}
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ }
/* unlock regs and wait for update */
for (i = 0; i < rdev->num_crtc; i++) {
}
}
}
- /* Unlock vga access */
- WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ if (!ASIC_IS_NODCE(rdev)) {
+ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ }
}
void evergreen_mc_program(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen/cayman/tn */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
r700_vram_gtt_location(rdev, &rdev->mc);
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
- u32 link_width_cntl, speed_cntl, mask;
- int ret;
+ u32 link_width_cntl, speed_cntl;
if (radeon_pcie_gen2 == 0)
return;
if (ASIC_IS_X2(rdev))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- u32 base_rate = 48000;
+ u32 base_rate = 24000;
if (!dig || !dig->afmt)
return;
- /* XXX: properly calculate this */
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
- WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
}
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r100_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r300_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
for (i = 0; i < nr; ++i) {
- if (DRM_COPY_FROM_USER_UNCHECKED
+ if (DRM_COPY_FROM_USER
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return -EFAULT;
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
if (r) {
return r;
}
- r = radeon_irq_kms_init(rdev);
- if (r) {
- return r;
- }
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
return -1;
}
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+ r = RREG32(R_0028FC_MC_DATA);
+ WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+ S_0028F8_MC_IND_WR_EN(1));
+ WREG32(R_0028FC_MC_DATA, v);
+ WREG32(R_0028F8_MC_INDEX, 0x7F);
+}
+
static void r600_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
{
u32 tmp;
int chansize, numchan;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+ }
}
+
radeon_update_bandwidth_info(rdev);
return 0;
}
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
- u32 mask;
- int ret;
if (radeon_pcie_gen2 == 0)
return;
if (rdev->family <= CHIP_R600)
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 base_rate = 48000;
+ u32 base_rate = 24000;
if (!dig || !dig->afmt)
return;
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
* doesn't matter which one you use. Just use the first one.
*/
- /* XXX: properly calculate this */
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0 as well.
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
- WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
- AUDIO_DTO_MODULE(clock * 100));
+ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
+ AUDIO_DTO_MODULE(clock / 10));
}
}
#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
+#define R_000011_K8_FB_LOCATION 0x11
+#define R_000012_MC_MISC_UMA_CNTL 0x12
+#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX 0x28F8
+#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
+#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
+#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA 0x28FC
#define R_008020_GRBM_SOFT_RESET 0x8020
#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool audio_enabled;
+ bool has_uvd;
struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
+#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
/*
* BIOS helpers.
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ rdev->mc_rreg = &rs780_mc_rreg;
+ rdev->mc_wreg = &rs780_mc_wreg;
+ }
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
else
rdev->num_crtc = 2;
+ rdev->has_uvd = false;
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
case CHIP_RV635:
case CHIP_RV670:
rdev->asic = &r600_asic;
+ if (rdev->family == CHIP_R600)
+ rdev->has_uvd = false;
+ else
+ rdev->has_uvd = true;
break;
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &rs780_asic;
+ rdev->has_uvd = true;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
rdev->asic = &rv770_asic;
+ rdev->has_uvd = true;
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
else
rdev->num_crtc = 6;
rdev->asic = &evergreen_asic;
+ rdev->has_uvd = true;
break;
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
rdev->asic = &sumo_asic;
+ rdev->has_uvd = true;
break;
case CHIP_BARTS:
case CHIP_TURKS:
else
rdev->num_crtc = 6;
rdev->asic = &btc_asic;
+ rdev->has_uvd = true;
break;
case CHIP_CAYMAN:
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
+ rdev->has_uvd = true;
break;
case CHIP_ARUBA:
rdev->asic = &trinity_asic;
/* set num crtcs */
rdev->num_crtc = 4;
+ rdev->has_uvd = true;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
+ case CHIP_HAINAN:
rdev->asic = &si_asic;
/* set num crtcs */
- if (rdev->family == CHIP_OLAND)
+ if (rdev->family == CHIP_HAINAN)
+ rdev->num_crtc = 0;
+ else if (rdev->family == CHIP_OLAND)
rdev->num_crtc = 2;
else
rdev->num_crtc = 6;
+ if (rdev->family == CHIP_HAINAN)
+ rdev->has_uvd = false;
+ else
+ rdev->has_uvd = true;
break;
default:
/* FIXME: not supported yet */
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
/* enable the rom */
WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
- /* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_VGA_RENDER_CONTROL,
- (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+ if (!ASIC_IS_NODCE(rdev)) {
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+ }
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = radeon_read_bios(rdev);
/* restore regs */
WREG32(R600_BUS_CNTL, bus_cntl);
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ }
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
}
"PITCAIRN",
"VERDE",
"OLAND",
+ "HAINAN",
"LAST",
};
{
uint32_t reg;
+ /* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
- rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+ (rdev->family < CHIP_R600))
return false;
+ if (ASIC_IS_NODCE(rdev))
+ goto check_memsize;
+
/* first check CRTCs */
- if (ASIC_IS_DCE41(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (reg & EVERGREEN_CRTC_MASTER_EN)
- return true;
- } else if (ASIC_IS_DCE4(rdev)) {
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
} else if (ASIC_IS_AVIVO(rdev)) {
}
}
+check_memsize:
/* then check MEM_SIZE, in case the crtcs are off */
if (rdev->family >= CHIP_R600)
reg = RREG32(R600_CONFIG_MEMSIZE);
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
struct radeon_unpin_work *work;
- struct drm_pending_vblank_event *e;
- struct timeval now;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
radeon_crtc->unpin_work = NULL;
/* wakeup userspace */
- if (work->event) {
- e = work->event;
- e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
+ if (work->event)
+ drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
#endif
int radeon_no_wb;
-int radeon_modeset = 1;
+int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
static int __init radeon_init(void)
{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && radeon_modeset == -1) {
+ DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+ radeon_modeset = 0;
+ }
+#endif
+ /* set to modesetting by default if not nomodeset */
+ if (radeon_modeset == -1)
+ radeon_modeset = 1;
+
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
CHIP_PITCAIRN,
CHIP_VERDE,
CHIP_OLAND,
+ CHIP_HAINAN,
CHIP_LAST,
};
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
- radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
- radeon_crtc->in_mode_set = false;
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
- bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
- (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+ (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
rs400_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
rs600_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
- r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
chip_id = 0x0100000b;
break;
case CHIP_SUMO:
- chip_id = 0x0100000c;
- break;
case CHIP_SUMO2:
- chip_id = 0x0100000d;
+ chip_id = 0x0100000c;
break;
case CHIP_PALM:
chip_id = 0x0100000e;
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
- u32 mask;
- int ret;
if (radeon_pcie_gen2 == 0)
return;
if (ASIC_IS_X2(rdev))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
MODULE_FIRMWARE("radeon/OLAND_mc.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
0x15c0, 0x000c0fc0, 0x000c0400
};
+static const u32 hainan_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd8c0, 0xff000fff, 0x00000100,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x00000000,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x03e00000, 0x03600000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f1,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00003210,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+ 0x98f8, 0xffffffff, 0x02010001
+};
+
static const u32 tahiti_mgcg_cgcg_init[] =
{
0xc400, 0xffffffff, 0xfffffffc,
0xd8c0, 0xfffffff0, 0x00000100
};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
static u32 verde_pg_init[] =
{
0x353c, 0xffffffff, 0x40000,
oland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
break;
+ case CHIP_HAINAN:
+ radeon_program_register_sequence(rdev,
+ hainan_golden_registers,
+ (const u32)ARRAY_SIZE(hainan_golden_registers));
+ radeon_program_register_sequence(rdev,
+ hainan_golden_registers2,
+ (const u32)ARRAY_SIZE(hainan_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ hainan_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ break;
default:
break;
}
{0x0000009f, 0x00a17730}
};
+static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a07730}
+};
+
/* ucode loading */
static int si_mc_load_microcode(struct radeon_device *rdev)
{
ucode_size = OLAND_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = OLAND_MC_UCODE_SIZE * 4;
break;
+ case CHIP_HAINAN:
+ chip_name = "HAINAN";
+ rlc_chip_name = "HAINAN";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
default: BUG();
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if ((rdev->family == CHIP_VERDE) ||
- (rdev->family == CHIP_OLAND)) {
+ (rdev->family == CHIP_OLAND) ||
+ (rdev->family == CHIP_HAINAN)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
default:
rdev->config.si.max_shader_engines = 1;
rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_cu_per_sh = 2;
+ rdev->config.si.max_cu_per_sh = 5;
rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 4;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_HAINAN:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 1;
+ rdev->config.si.max_texture_channel_caches = 2;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+ break;
}
/* Initialize HDP */
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
- WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ if (rdev->has_uvd) {
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ }
si_tiling_mode_table_init(rdev);
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- /* Lockout access through VGA aperture*/
- WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ if (!ASIC_IS_NODCE(rdev))
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
/* Update configuration */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
evergreen_mc_resume(rdev, &save);
- /* we need to own VRAM, so turn off the VGA renderer here
- * to stop it overwriting our objects */
- rv515_vga_render_disable(rdev);
+ if (!ASIC_IS_NODCE(rdev)) {
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+ }
}
static void si_vram_gtt_location(struct radeon_device *rdev,
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ if (rdev->num_crtc >= 2) {
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ }
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ }
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
-
- tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
}
static int si_irq_init(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
- u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 dma_cntl, dma_cntl1;
return 0;
}
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ if (!ASIC_IS_NODCE(rdev)) {
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
- WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
- WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ if (rdev->num_crtc >= 2) {
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ }
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ }
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
- WREG32(DC_HPD1_INT_CONTROL, hpd1);
- WREG32(DC_HPD2_INT_CONTROL, hpd2);
- WREG32(DC_HPD3_INT_CONTROL, hpd3);
- WREG32(DC_HPD4_INT_CONTROL, hpd4);
- WREG32(DC_HPD5_INT_CONTROL, hpd5);
- WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ }
return 0;
}
{
u32 tmp;
+ if (ASIC_IS_NODCE(rdev))
+ return;
+
rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
return r;
}
- r = rv770_uvd_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
+ if (rdev->has_uvd) {
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
}
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = si_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
- if (!r)
- r = r600_uvd_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ if (rdev->has_uvd) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
}
r = radeon_ib_pool_init(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
- radeon_uvd_suspend(rdev);
+ if (rdev->has_uvd) {
+ r600_uvd_rbc_stop(rdev);
+ radeon_uvd_suspend(rdev);
+ }
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
+ if (rdev->has_uvd) {
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
}
rdev->ih.ring_obj = NULL;
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_uvd_fini(rdev);
+ if (rdev->has_uvd)
+ radeon_uvd_fini(rdev);
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x634
{
struct drm_pending_vblank_event *event;
struct drm_device *dev = scrtc->crtc.dev;
- struct timeval vblanktime;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = scrtc->event;
scrtc->event = NULL;
+ if (event) {
+ drm_send_vblank_event(dev, 0, event);
+ drm_vblank_put(dev, 0);
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
-
- if (event == NULL)
- return;
-
- event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
- event->event.tv_sec = vblanktime.tv_sec;
- event->event.tv_usec = vblanktime.tv_usec;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&event->base.link, &event->base.file_priv->event_list);
- wake_up_interruptible(&event->base.file_priv->event_wait);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- drm_vblank_put(dev, 0);
}
static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
help
Choose this option if you have an TI SoC with LCDC display
controller, for example AM33xx in beagle-bone, DA8xx, or
return err;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(&pdev->dev, "failed to get registers\n");
- return -ENXIO;
- }
-
dc->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
return 0;
}
cur_cpu = (++next_vp % max_cpus);
- return cur_cpu;
+ return hv_context.vp_index[cur_cpu];
}
/*
pr_info("found Abit uGuru\n");
/* Register sysfs hooks */
- for (i = 0; i < sysfs_attr_i; i++)
- if (device_create_file(&pdev->dev,
- &data->sysfs_attr[i].dev_attr))
+ for (i = 0; i < sysfs_attr_i; i++) {
+ res = device_create_file(&pdev->dev,
+ &data->sysfs_attr[i].dev_attr);
+ if (res)
goto abituguru_probe_error;
- for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
- if (device_create_file(&pdev->dev,
- &abituguru_sysfs_attr[i].dev_attr))
+ }
+ for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
+ res = device_create_file(&pdev->dev,
+ &abituguru_sysfs_attr[i].dev_attr);
+ if (res)
goto abituguru_probe_error;
+ }
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (!IS_ERR(data->hwmon_dev))
return PTR_ERR(channels);
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return -ENOMEM;
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
st->channels = channels;
error_remove_group:
sysfs_remove_group(&dev->kobj, &st->attr_group);
error_release_channels:
- iio_channel_release_all(st->channels);
+ iio_channel_release_all(channels);
return ret;
}
data->have_temp |= 1 << i;
data->have_temp_fixed |= 1 << i;
data->reg_temp[0][i] = reg_temp_alternate[i];
- data->reg_temp[1][i] = reg_temp_over[i];
- data->reg_temp[2][i] = reg_temp_hyst[i];
+ if (i < num_reg_temp) {
+ data->reg_temp[1][i] = reg_temp_over[i];
+ data->reg_temp[2][i] = reg_temp_hyst[i];
+ }
data->temp_src[i] = i + 1;
continue;
}
mutex_lock(&data->update_lock);
next_update = data->last_updated +
- msecs_to_jiffies(data->update_interval) + 1;
+ msecs_to_jiffies(data->update_interval);
if (time_after(jiffies, next_update) || !data->valid) {
if (data->kind != tmp432) {
/*
/* Enable the adapter */
__i2c_dw_enable(dev, true);
- /* Enable interrupts */
+ /* Clear and enable interrupts */
+ i2c_dw_clear_int(dev);
dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
}
cmd |= BIT(9);
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+
+ /* avoid rx buffer overrun */
+ if (rx_limit - dev->rx_outstanding <= 0)
+ break;
+
dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
rx_limit--;
+ dev->rx_outstanding++;
} else
dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
rx_valid = dw_readl(dev, DW_IC_RXFLR);
- for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+ dev->rx_outstanding--;
+ }
if (len > 0) {
dev->status |= STATUS_READ_IN_PROGRESS;
dev->msg_err = 0;
dev->status = STATUS_IDLE;
dev->abort_source = 0;
+ dev->rx_outstanding = 0;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
* @adapter: i2c subsystem adapter node
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
+ * @rx_outstanding: current master-rx elements in tx fifo
*/
struct dw_i2c_dev {
struct device *dev;
u32 master_cfg;
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
+ int rx_outstanding;
};
#define ACCESS_SWAP 0x00000001
static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "INT33C2", 0 },
{ "INT33C3", 0 },
+ { "80860F41", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
static unsigned int disable_features;
module_param(disable_features, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
+ "\t\t 0x01 disable SMBus PEC\n"
+ "\t\t 0x02 disable the block buffer\n"
+ "\t\t 0x08 disable the I2C block read functionality\n"
+ "\t\t 0x10 don't use interrupts ");
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
writel(drv_data->cntl_bits,
drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
drv_data->block = 0;
- wake_up_interruptible(&drv_data->waitq);
+ wake_up(&drv_data->waitq);
break;
case MV64XXX_I2C_ACTION_CONTINUE:
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
drv_data->block = 0;
- wake_up_interruptible(&drv_data->waitq);
+ wake_up(&drv_data->waitq);
break;
case MV64XXX_I2C_ACTION_INVALID:
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
drv_data->block = 0;
- wake_up_interruptible(&drv_data->waitq);
+ wake_up(&drv_data->waitq);
break;
}
}
unsigned long flags;
char abort = 0;
- time_left = wait_event_interruptible_timeout(drv_data->waitq,
+ time_left = wait_event_timeout(drv_data->waitq,
!drv_data->block, drv_data->adapter.timeout);
spin_lock_irqsave(&drv_data->lock, flags);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "cannot find IO resource\n");
- return -ENOENT;
- }
-
i2c->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(i2c->regs))
adap->class = I2C_CLASS_HWMON;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem_res == NULL) {
- dev_err(&pdev->dev, "Unable to get MEM resource\n");
- err = -EINVAL;
- goto out;
- }
-
siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(siic->base)) {
err = PTR_ERR(siic->base);
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no mem resource\n");
- return -EINVAL;
- }
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
}
static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
-static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device);
+static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
+ i2c_sysfs_delete_device);
static struct attribute *i2c_adapter_attrs[] = {
&dev_attr_name.attr,
#ifdef CONFIG_PM_SLEEP
static int exynos_adc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_adc *info = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct exynos_adc *info = iio_priv(indio_dev);
u32 con;
if (info->version == ADC_V2) {
static int exynos_adc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_adc *info = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct exynos_adc *info = iio_priv(indio_dev);
int ret;
ret = regulator_enable(info->vdd);
while (chan->indio_dev) {
if (chan->indio_dev != indio_dev) {
ret = -EINVAL;
- goto error_release_channels;
+ goto error_free_scan_mask;
}
set_bit(chan->channel->scan_index,
cb_buff->buffer.scan_mask);
return cb_buff;
+error_free_scan_mask:
+ kfree(cb_buff->buffer.scan_mask);
error_release_channels:
iio_channel_release_all(cb_buff->channels);
error_free_cb_buff:
void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
{
+ kfree(cb_buff->buffer.scan_mask);
iio_channel_release_all(cb_buff->channels);
kfree(cb_buff);
}
goto read_error;
*val = *val >> ch->scan_type.shift;
+
+ err = st_sensors_set_enable(indio_dev, false);
}
mutex_unlock(&indio_dev->mlock);
config AD5064
tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
- depends on (SPI_MASTER || I2C)
+ depends on (SPI_MASTER && I2C!=m) || I2C
help
Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
config AD5380
tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
- depends on (SPI_MASTER || I2C)
+ depends on (SPI_MASTER && I2C!=m) || I2C
select REGMAP_I2C if I2C
select REGMAP_SPI if SPI_MASTER
help
config AD5446
tristate "Analog Devices AD5446 and similar single channel DACs driver"
- depends on (SPI_MASTER || I2C)
+ depends on (SPI_MASTER && I2C!=m) || I2C
help
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
(pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
- ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9)));
+ ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
st->regs[ADF4350_REG3] = pdata->r3_user_settings &
(ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
channel->indio_dev = indio_dev;
index = iiospec.args_count ? iiospec.args[0] : 0;
if (index >= indio_dev->num_channels) {
- return -EINVAL;
+ err = -EINVAL;
goto err_put;
}
channel->channel = &indio_dev->channels[index];
s64 raw64 = raw;
int ret;
- ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
+ ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
if (ret == 0)
raw64 += offset;
if (dma_region) {
struct qib_mregion *tmr;
- tmr = rcu_dereference(dev->dma_mr);
+ tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
qib_get_mr(mr);
rcu_assign_pointer(dev->dma_mr, mr);
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
* maintained by openib-general@openib.org
*
* This software is available to you under a choice of one of two
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
}
/**
- * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
+ * releases the FMR pool and QP objects, returns 0 on success,
* -1 on failure
*/
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
{
int cq_index;
BUG_ON(ib_conn == NULL);
rdma_destroy_qp(ib_conn->cma_id);
}
- /* if cma handler context, the caller acts s.t the cma destroy the id */
- if (ib_conn->cma_id != NULL && can_destroy_id)
- rdma_destroy_id(ib_conn->cma_id);
ib_conn->fmr_pool = NULL;
ib_conn->qp = NULL;
- ib_conn->cma_id = NULL;
kfree(ib_conn->page_vec);
if (ib_conn->login_buf) {
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn, can_destroy_id);
+ iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
+ /* if cma handler context, the caller actually destroy the id */
+ if (ib_conn->cma_id != NULL && can_destroy_id) {
+ rdma_destroy_id(ib_conn->cma_id);
+ ib_conn->cma_id = NULL;
+ }
iscsi_destroy_endpoint(ib_conn->ep);
}
spin_unlock_irq(&sdev->spinlock);
}
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+ struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ if (ch->in_shutdown) {
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return true;
+ }
+
+ ch->in_shutdown = true;
+ target_sess_cmd_list_set_waiting(se_sess);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ return true;
+}
+
/**
* srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
* @cm_id: Pointer to the CM ID of the channel to be drained.
spin_unlock_irq(&sdev->spinlock);
if (do_reset) {
+ if (ch->sess)
+ srpt_shutdown_session(ch->sess);
+
ret = srpt_ch_qp_err(ch);
if (ret < 0)
printk(KERN_ERR "Setting queue pair in error state"
se_sess = ch->sess;
BUG_ON(!se_sess);
- target_wait_for_sess_cmds(se_sess, 0);
+ target_wait_for_sess_cmds(se_sess);
transport_deregister_session_configfs(se_sess);
transport_deregister_session(se_sess);
spin_unlock_irqrestore(&ch->spinlock, flags);
}
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- return true;
-}
-
/**
* srpt_close_session() - Forcibly close a session.
*
u8 sess_name[36];
struct work_struct release_work;
struct completion *release_done;
+ bool in_shutdown;
};
/**
{
struct synaptics_data *priv = psmouse->private;
struct synaptics_data old_priv = *priv;
+ unsigned char param[2];
int retry = 0;
int error;
*/
ssleep(1);
}
+ ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETID);
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
- switch (wacom->id[idx] & 0xfffff) {
+ switch (wacom->id[idx]) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x20802: /* Intuos4 Inking Pen */
+ case 0x120802: /* Intuos4/5 Inking Pen */
case 0x012:
wacom->tool[idx] = BTN_TOOL_PENCIL;
break;
case 0x823: /* Intuos3 Grip Pen */
case 0x813: /* Intuos3 Classic Pen */
case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4 General Pen */
- case 0x804: /* Intuos4 Marker Pen */
- case 0x40802: /* Intuos4 Classic Pen */
- case 0x18802: /* DTH2242 Grip Pen */
+ case 0x802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x022:
+ case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x160802: /* Cintiq 13HD Pro Pen */
+ case 0x180802: /* DTH2242 Pen */
wacom->tool[idx] = BTN_TOOL_PEN;
break;
case 0x82b: /* Intuos3 Grip Pen Eraser */
case 0x81b: /* Intuos3 Classic Pen Eraser */
case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4 Marker Pen Eraser */
- case 0x80a: /* Intuos4 General Pen Eraser */
- case 0x4080a: /* Intuos4 Classic Pen Eraser */
- case 0x90a: /* Intuos4 Airbrush Eraser */
+ case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
+ case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x18080a: /* DTH2242 Eraser */
wacom->tool[idx] = BTN_TOOL_RUBBER;
break;
case 0x912:
case 0x112:
case 0x913: /* Intuos3 Airbrush */
- case 0x902: /* Intuos4 Airbrush */
+ case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
break;
input_report_key(input, BTN_8, (data[3] & 0x80));
}
if (data[1] | (data[2] & 0x01) | data[3]) {
- input_report_key(input, wacom->tool[1], 1);
input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
} else if (features->type == DTK) {
input_report_key(input, BTN_3, (data[6] & 0x08));
input_report_key(input, BTN_4, (data[6] & 0x10));
input_report_key(input, BTN_5, (data[6] & 0x20));
+ if (data[6] & 0x3f) {
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_abs(input, ABS_MISC, 0);
+ }
+ } else if (features->type == WACOM_13HD) {
+ input_report_key(input, BTN_0, (data[3] & 0x01));
+ input_report_key(input, BTN_1, (data[4] & 0x01));
+ input_report_key(input, BTN_2, (data[4] & 0x02));
+ input_report_key(input, BTN_3, (data[4] & 0x04));
+ input_report_key(input, BTN_4, (data[4] & 0x08));
+ input_report_key(input, BTN_5, (data[4] & 0x10));
+ input_report_key(input, BTN_6, (data[4] & 0x20));
+ input_report_key(input, BTN_7, (data[4] & 0x40));
+ input_report_key(input, BTN_8, (data[4] & 0x80));
+ if ((data[3] & 0x01) | data[4]) {
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else if (features->type == WACOM_24HD) {
input_report_key(input, BTN_0, (data[6] & 0x01));
input_report_key(input, BTN_1, (data[6] & 0x02));
}
if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
- input_report_key(input, wacom->tool[1], 1);
input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
} else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
}
if (data[2] | (data[3] & 0x01) | data[4] | data[5]) {
- input_report_key(input, wacom->tool[1], 1);
input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
} else {
if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) |
data[2] | (data[3] & 0x1f) | data[4] | data[8] |
(data[7] & 0x01)) {
- input_report_key(input, wacom->tool[1], 1);
input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
}
case INTUOS4L:
case CINTIQ:
case WACOM_BEE:
+ case WACOM_13HD:
case WACOM_21UX2:
case WACOM_22HD:
case WACOM_24HD:
__set_bit(KEY_PROG1, input_dev->keybit);
__set_bit(KEY_PROG2, input_dev->keybit);
__set_bit(KEY_PROG3, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
/* fall through */
case DTK:
for (i = 0; i < 6; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
-
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
wacom_setup_cintiq(wacom_wac);
wacom_setup_cintiq(wacom_wac);
break;
+ case WACOM_13HD:
+ for (i = 0; i < 9; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ wacom_setup_cintiq(wacom_wac);
+ break;
+
case INTUOS3:
case INTUOS3L:
__set_bit(BTN_4, input_dev->keybit);
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xF8 =
{ "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */
- 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023,
63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x304 =
+ { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59552, 33848, 1023,
+ 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xC7 =
{ "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511,
0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x57 =
+ { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
+ 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
static const struct wacom_features wacom_features_0x59 = /* Pen */
{ "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
static const struct wacom_features wacom_features_0xFA =
{ "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x5B =
+ { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
+ 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
+static const struct wacom_features wacom_features_0x5E =
+ { "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 };
static const struct wacom_features wacom_features_0x90 =
{ "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xEC =
{ "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
{ USB_DEVICE_WACOM(0x43) },
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
+ { USB_DEVICE_WACOM(0x57) },
{ USB_DEVICE_WACOM(0x59) },
{ USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x5B) },
+ { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
{ USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) },
{ USB_DEVICE_WACOM(0x10D) },
+ { USB_DEVICE_WACOM(0x304) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
WACOM_24HD,
CINTIQ,
WACOM_BEE,
+ WACOM_13HD,
WACOM_MO,
WIRELESS,
BAMBOO_PT,
input_set_abs_params(input_dev,
ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
input_set_abs_params(input_dev,
- ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+ ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0);
input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0);
input_set_drvdata(input_dev, ts);
{
u32 irqnr;
- do {
- irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
- if (irqnr != 0x7f) {
- __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
- irqnr = irq_find_mapping(icoll_domain, irqnr);
- handle_IRQ(irqnr, regs);
- continue;
- }
- break;
- } while (1);
+ irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
+ __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
+ irqnr = irq_find_mapping(icoll_domain, irqnr);
+ handle_IRQ(irqnr, regs);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(f->valid & BIT(hwirq)))
- return -ENOTSUPP;
+ return -EPERM;
irq_set_chip_data(irq, f);
irq_set_chip_and_handler(irq, &f->chip,
handle_level_irq);
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(v->valid_sources & (1 << hwirq)))
- return -ENOTSUPP;
+ return -EPERM;
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
irq_set_chip_data(irq, v->base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
{
- if (contr - 1 >= CAPI_MAXCONTR)
+ if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
return NULL;
return capi_controller[contr - 1];
{
lockdep_assert_held(&capi_controller_lock);
- if (applid - 1 >= CAPI_MAXAPPL)
+ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
return NULL;
return capi_applications[applid - 1];
static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
{
- if (applid - 1 >= CAPI_MAXAPPL)
+ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
return NULL;
return rcu_dereference(capi_applications[applid - 1]);
return 0;
}
+ ret = devm_gpio_request(parent, template->gpio, template->name);
+ if (ret < 0)
+ return ret;
+
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = devm_gpio_request_one(parent, template->gpio,
- (led_dat->active_low ^ state) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
return ret;
{
.name = "led_1",
.port = 0x49,
- .mask = BIT(7),
+ .mask = BIT(6),
},
{
.name = "led_2",
.port = 0x49,
- .mask = BIT(6),
+ .mask = BIT(5),
},
{
.name = "led_3",
.port = 0x49,
- .mask = BIT(5),
+ .mask = BIT(4),
},
{
.name = "led_4",
.port = 0x49,
- .mask = BIT(4),
+ .mask = BIT(3),
},
{
.name = "led_5",
.port = 0x49,
- .mask = BIT(3),
+ .mask = BIT(2),
},
{
.name = "led_6",
.port = 0x49,
- .mask = BIT(2),
+ .mask = BIT(1),
},
{
.name = "led_7",
.port = 0x49,
- .mask = BIT(1),
+ .mask = BIT(0),
}
};
kill_guest(&lg->cpus[0],
"Cannot populate switcher mapping");
}
+ lg->pgdirs[pgdir].last_host_cpu = -1;
}
}
*need_commit = false;
- metadata_dev_size = get_metadata_dev_size(pool->md_dev);
+ metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
if (r) {
}
if (metadata_dev_size < sb_metadata_dev_size) {
- DMERR("metadata device (%llu sectors) too small: expected %llu",
+ DMERR("metadata device (%llu blocks) too small: expected %llu",
metadata_dev_size, sb_metadata_dev_size);
return -EINVAL;
struct zoran_mapping {
struct zoran_fh *fh;
- int count;
+ atomic_t count;
};
struct zoran_buffer {
zoran_vm_open (struct vm_area_struct *vma)
{
struct zoran_mapping *map = vma->vm_private_data;
-
- map->count++;
+ atomic_inc(&map->count);
}
static void
struct zoran *zr = fh->zr;
int i;
- if (--map->count > 0)
+ if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock))
return;
dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
kfree(map);
/* Any buffers still mapped? */
- for (i = 0; i < fh->buffers.num_buffers; i++)
- if (fh->buffers.buffer[i].map)
+ for (i = 0; i < fh->buffers.num_buffers; i++) {
+ if (fh->buffers.buffer[i].map) {
+ mutex_unlock(&zr->resource_lock);
return;
+ }
+ }
dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
__func__, mode_name(fh->map_mode));
- mutex_lock(&zr->resource_lock);
if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
if (fh->buffers.active != ZORAN_FREE) {
goto mmap_unlock_and_return;
}
map->fh = fh;
- map->count = 1;
+ atomic_set(&map->count, 1);
vma->vm_ops = &zoran_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
struct omap_dss_device *def_display;
struct omap2video_device *vid_dev = NULL;
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
ret = omapdss_compat_init();
if (ret) {
dev_err(&pdev->dev, "failed to init dss\n");
platform_set_drvdata(pdev, emif);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(emif->dev, "%s: error getting memory resource\n",
- __func__);
- goto error;
- }
-
emif->base = devm_ioremap_resource(emif->dev, res);
if (IS_ERR(emif->base))
goto error;
config AB8500_DEBUG
bool "Enable debug info via debugfs"
- depends on AB8500_CORE && DEBUG_FS
+ depends on AB8500_GPADC && DEBUG_FS
default y if DEBUG_FS
help
Select this option if you want debug information using the debug
config MFD_TPS65912
bool "TI TPS65912 Power Management chip"
depends on GPIOLIB
+ select MFD_CORE
help
If you say yes here you get support for the TPS65912 series of
PM chips.
#ifdef CONFIG_DEBUG_FS
static struct resource ab8500_debug_resources[] = {
+ {
+ .name = "IRQ_AB8500",
+ /*
+ * Number will be filled in. NOTE: this is deliberately
+ * not flagged as an IRQ in ordet to avoid remapping using
+ * the irqdomain in the MFD core, so that this IRQ passes
+ * unremapped to the debug code.
+ */
+ },
{
.name = "IRQ_FIRST",
.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
},
{
.name = "ab8500-gpadc",
+ .of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
.resources = ab8500_gpadc_resources,
},
.of_compatible = "stericsson,ab8500-denc",
},
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab8500",
.of_compatible = "stericsson,ab8500-gpio",
},
{
},
{
.name = "ab8500-gpadc",
+ .of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
.resources = ab8505_gpadc_resources,
},
.name = "ab8500-leds",
},
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab8505",
},
{
.name = "ab8500-usb",
},
{
.name = "ab8500-gpadc",
+ .of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
.resources = ab8505_gpadc_resources,
},
.resources = ab8500_temp_resources,
},
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab8540",
},
{
.name = "ab8540-usb",
if (ret)
return ret;
+#if CONFIG_DEBUG_FS
+ /* Pass to debugfs */
+ ab8500_debug_resources[0].start = ab8500->irq;
+ ab8500_debug_resources[0].end = ab8500->irq;
+#endif
+
if (is_ab9540(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
ARRAY_SIZE(ab9540_devs), NULL,
#include <linux/ctype.h>
#endif
-/* TODO: this file should not reference IRQ_DB8500_AB8500! */
-#include <mach/irqs.h>
-
static u32 debug_bank;
static u32 debug_address;
+static int irq_ab8500;
static int irq_first;
static int irq_last;
static u32 *irq_count;
{
if (line < num_interrupt_lines) {
num_interrupts[line]++;
- if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500))
+ if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500))
num_wake_interrupts[line]++;
}
}
struct dentry *file;
int ret = -ENOMEM;
struct ab8500 *ab8500;
+ struct resource *res;
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
if (!event_name)
goto out_freedev_attr;
+ res = platform_get_resource_byname(plf, 0, "IRQ_AB8500");
+ if (!res) {
+ dev_err(&plf->dev, "AB8500 irq not found, err %d\n",
+ irq_first);
+ ret = -ENXIO;
+ goto out_freeevent_name;
+ }
+ irq_ab8500 = res->start;
+
irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
if (irq_first < 0) {
dev_err(&plf->dev, "First irq not found, err %d\n",
static int ab8500_gpadc_resume(struct device *dev)
{
struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+ int ret;
- regulator_enable(gpadc->regu);
+ ret = regulator_enable(gpadc->regu);
+ if (ret)
+ dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
pm_runtime_mark_last_busy(gpadc->dev);
pm_runtime_put_autosuspend(gpadc->dev);
mutex_unlock(&gpadc->ab8500_gpadc_lock);
- return 0;
+ return ret;
}
static int ab8500_gpadc_probe(struct platform_device *pdev)
static struct device *sysctrl_dev;
-void ab8500_power_off(void)
+static void ab8500_power_off(void)
{
sigset_t old;
sigset_t all;
plat = dev_get_platdata(sysctrl_dev->parent);
pdata = plat->sysctrl;
- if (pdata->reboot_reason_code)
+ if (pdata && pdata->reboot_reason_code)
reason = pdata->reboot_reason_code(cmd);
else
pr_warn("[%s] No reboot reason set. Default reason %d\n",
plat = dev_get_platdata(pdev->dev.parent);
- if (!(plat && plat->sysctrl))
+ if (!plat)
return -EINVAL;
- if (plat->pm_power_off)
+ sysctrl_dev = &pdev->dev;
+
+ if (!pm_power_off)
pm_power_off = ab8500_power_off;
pdata = plat->sysctrl;
-
if (pdata) {
int last, ret, i, j;
static int ab8500_sysctrl_remove(struct platform_device *pdev)
{
sysctrl_dev = NULL;
+
+ if (pm_power_off == ab8500_power_off)
+ pm_power_off = NULL;
+
return 0;
}
void abx500_dump_all_banks(void)
{
struct abx500_ops *ops;
- struct device dummy_child = {0};
+ struct device dummy_child = {NULL};
struct abx500_device_entry *dev_entry;
list_for_each_entry(dev_entry, &abx500_list, list) {
for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
if (*ptr == EC_MSG_HEADER) {
- dev_dbg(ec_dev->dev, "msg found at %ld\n",
+ dev_dbg(ec_dev->dev, "msg found at %zd\n",
ptr - ec_dev->din);
break;
}
* maximum-supported transfer size.
*/
todo = min(need_len, 256);
- dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n",
+ dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
todo, need_len, ptr - ec_dev->din);
memset(&trans, '\0', sizeof(trans));
need_len -= todo;
}
- dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din);
+ dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
return 0;
}
if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
divsel = dsiclk[n].divsel;
+ else
+ dsiclk[n].divsel = divsel;
switch (divsel) {
case PRCM_DSI_PLLOUT_SEL_PHI_4:
.num_resources = ARRAY_SIZE(db8500_thsens_resources),
.resources = db8500_thsens_resources,
.platform_data = &db8500_thsens_data,
+ .pdata_size = sizeof(db8500_thsens_data),
},
};
* the clients via intel_msic_irq_read().
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
- return -ENODEV;
- }
-
msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msic->irq_base))
return PTR_ERR(msic->irq_base);
#include <linux/mfd/si476x-core.h>
+#include <asm/unaligned.h>
+
#define msb(x) ((u8)((u16) x >> 8))
#define lsb(x) ((u8)((u16) x & 0x00FF))
SI476X_ACF_SOFTMUTE_INT = (1 << 0),
SI476X_ACF_SMUTE = (1 << 0),
- SI476X_ACF_SMATTN = 0b11111,
+ SI476X_ACF_SMATTN = 0x1f,
SI476X_ACF_PILOT = (1 << 7),
SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT,
};
if (err < 0)
return err;
else
- return be16_to_cpup((__be16 *)(resp + 2));
+ return get_unaligned_be16(resp + 2);
}
EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
if (!report)
return err;
- report->snrhint = 0b00001000 & resp[1];
- report->snrlint = 0b00000100 & resp[1];
- report->rssihint = 0b00000010 & resp[1];
- report->rssilint = 0b00000001 & resp[1];
+ report->snrhint = 0x08 & resp[1];
+ report->snrlint = 0x04 & resp[1];
+ report->rssihint = 0x02 & resp[1];
+ report->rssilint = 0x01 & resp[1];
- report->bltf = 0b10000000 & resp[2];
- report->snr_ready = 0b00100000 & resp[2];
- report->rssiready = 0b00001000 & resp[2];
- report->afcrl = 0b00000010 & resp[2];
- report->valid = 0b00000001 & resp[2];
+ report->bltf = 0x80 & resp[2];
+ report->snr_ready = 0x20 & resp[2];
+ report->rssiready = 0x08 & resp[2];
+ report->afcrl = 0x02 & resp[2];
+ report->valid = 0x01 & resp[2];
- report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->readfreq = get_unaligned_be16(resp + 3);
report->freqoff = resp[5];
report->rssi = resp[6];
report->snr = resp[7];
if (err < 0 || report == NULL)
return err;
- report->rdstpptyint = 0b00010000 & resp[1];
- report->rdspiint = 0b00001000 & resp[1];
- report->rdssyncint = 0b00000010 & resp[1];
- report->rdsfifoint = 0b00000001 & resp[1];
+ report->rdstpptyint = 0x10 & resp[1];
+ report->rdspiint = 0x08 & resp[1];
+ report->rdssyncint = 0x02 & resp[1];
+ report->rdsfifoint = 0x01 & resp[1];
- report->tpptyvalid = 0b00010000 & resp[2];
- report->pivalid = 0b00001000 & resp[2];
- report->rdssync = 0b00000010 & resp[2];
- report->rdsfifolost = 0b00000001 & resp[2];
+ report->tpptyvalid = 0x10 & resp[2];
+ report->pivalid = 0x08 & resp[2];
+ report->rdssync = 0x02 & resp[2];
+ report->rdsfifolost = 0x01 & resp[2];
- report->tp = 0b00100000 & resp[3];
- report->pty = 0b00011111 & resp[3];
+ report->tp = 0x20 & resp[3];
+ report->pty = 0x1f & resp[3];
- report->pi = be16_to_cpup((__be16 *)(resp + 4));
+ report->pi = get_unaligned_be16(resp + 4);
report->rdsfifoused = resp[6];
- report->ble[V4L2_RDS_BLOCK_A] = 0b11000000 & resp[7];
- report->ble[V4L2_RDS_BLOCK_B] = 0b00110000 & resp[7];
- report->ble[V4L2_RDS_BLOCK_C] = 0b00001100 & resp[7];
- report->ble[V4L2_RDS_BLOCK_D] = 0b00000011 & resp[7];
+ report->ble[V4L2_RDS_BLOCK_A] = 0xc0 & resp[7];
+ report->ble[V4L2_RDS_BLOCK_B] = 0x30 & resp[7];
+ report->ble[V4L2_RDS_BLOCK_C] = 0x0c & resp[7];
+ report->ble[V4L2_RDS_BLOCK_D] = 0x03 & resp[7];
report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A;
report->rds[V4L2_RDS_BLOCK_A].msb = resp[8];
SI476X_DEFAULT_TIMEOUT);
if (!err) {
- report->expected = be16_to_cpup((__be16 *)(resp + 2));
- report->received = be16_to_cpup((__be16 *)(resp + 4));
- report->uncorrectable = be16_to_cpup((__be16 *)(resp + 6));
+ report->expected = get_unaligned_be16(resp + 2);
+ report->received = get_unaligned_be16(resp + 4);
+ report->uncorrectable = get_unaligned_be16(resp + 6);
}
return err;
{
u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP];
const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = {
- mode & 0b111,
+ mode & 0x07,
};
return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY,
const int am_freq = tuneargs->freq;
u8 resp[CMD_AM_TUNE_FREQ_NRESP];
const u8 args[CMD_AM_TUNE_FREQ_NARGS] = {
- (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11),
+ (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03),
msb(am_freq),
lsb(am_freq),
};
if (err < 0 || report == NULL)
return err;
- report->multhint = 0b10000000 & resp[1];
- report->multlint = 0b01000000 & resp[1];
- report->snrhint = 0b00001000 & resp[1];
- report->snrlint = 0b00000100 & resp[1];
- report->rssihint = 0b00000010 & resp[1];
- report->rssilint = 0b00000001 & resp[1];
+ report->multhint = 0x80 & resp[1];
+ report->multlint = 0x40 & resp[1];
+ report->snrhint = 0x08 & resp[1];
+ report->snrlint = 0x04 & resp[1];
+ report->rssihint = 0x02 & resp[1];
+ report->rssilint = 0x01 & resp[1];
- report->bltf = 0b10000000 & resp[2];
- report->snr_ready = 0b00100000 & resp[2];
- report->rssiready = 0b00001000 & resp[2];
- report->afcrl = 0b00000010 & resp[2];
- report->valid = 0b00000001 & resp[2];
+ report->bltf = 0x80 & resp[2];
+ report->snr_ready = 0x20 & resp[2];
+ report->rssiready = 0x08 & resp[2];
+ report->afcrl = 0x02 & resp[2];
+ report->valid = 0x01 & resp[2];
- report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->readfreq = get_unaligned_be16(resp + 3);
report->freqoff = resp[5];
report->rssi = resp[6];
report->snr = resp[7];
report->hassi = resp[10];
report->mult = resp[11];
report->dev = resp[12];
- report->readantcap = be16_to_cpup((__be16 *)(resp + 13));
+ report->readantcap = get_unaligned_be16(resp + 13);
report->assi = resp[15];
report->usn = resp[16];
if (err < 0 || report == NULL)
return err;
- report->multhint = 0b10000000 & resp[1];
- report->multlint = 0b01000000 & resp[1];
- report->snrhint = 0b00001000 & resp[1];
- report->snrlint = 0b00000100 & resp[1];
- report->rssihint = 0b00000010 & resp[1];
- report->rssilint = 0b00000001 & resp[1];
+ report->multhint = 0x80 & resp[1];
+ report->multlint = 0x40 & resp[1];
+ report->snrhint = 0x08 & resp[1];
+ report->snrlint = 0x04 & resp[1];
+ report->rssihint = 0x02 & resp[1];
+ report->rssilint = 0x01 & resp[1];
- report->bltf = 0b10000000 & resp[2];
- report->snr_ready = 0b00100000 & resp[2];
- report->rssiready = 0b00001000 & resp[2];
- report->afcrl = 0b00000010 & resp[2];
- report->valid = 0b00000001 & resp[2];
+ report->bltf = 0x80 & resp[2];
+ report->snr_ready = 0x20 & resp[2];
+ report->rssiready = 0x08 & resp[2];
+ report->afcrl = 0x02 & resp[2];
+ report->valid = 0x01 & resp[2];
- report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->readfreq = get_unaligned_be16(resp + 3);
report->freqoff = resp[5];
report->rssi = resp[6];
report->snr = resp[7];
report->hassi = resp[10];
report->mult = resp[11];
report->dev = resp[12];
- report->readantcap = be16_to_cpup((__be16 *)(resp + 13));
+ report->readantcap = get_unaligned_be16(resp + 13);
report->assi = resp[15];
report->usn = resp[16];
if (err < 0 || report == NULL)
return err;
- report->multhint = 0b10000000 & resp[1];
- report->multlint = 0b01000000 & resp[1];
- report->snrhint = 0b00001000 & resp[1];
- report->snrlint = 0b00000100 & resp[1];
- report->rssihint = 0b00000010 & resp[1];
- report->rssilint = 0b00000001 & resp[1];
-
- report->bltf = 0b10000000 & resp[2];
- report->snr_ready = 0b00100000 & resp[2];
- report->rssiready = 0b00001000 & resp[2];
- report->injside = 0b00000100 & resp[2];
- report->afcrl = 0b00000010 & resp[2];
- report->valid = 0b00000001 & resp[2];
-
- report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->multhint = 0x80 & resp[1];
+ report->multlint = 0x40 & resp[1];
+ report->snrhint = 0x08 & resp[1];
+ report->snrlint = 0x04 & resp[1];
+ report->rssihint = 0x02 & resp[1];
+ report->rssilint = 0x01 & resp[1];
+
+ report->bltf = 0x80 & resp[2];
+ report->snr_ready = 0x20 & resp[2];
+ report->rssiready = 0x08 & resp[2];
+ report->injside = 0x04 & resp[2];
+ report->afcrl = 0x02 & resp[2];
+ report->valid = 0x01 & resp[2];
+
+ report->readfreq = get_unaligned_be16(resp + 3);
report->freqoff = resp[5];
report->rssi = resp[6];
report->snr = resp[7];
report->hassi = resp[10];
report->mult = resp[11];
report->dev = resp[12];
- report->readantcap = be16_to_cpup((__be16 *)(resp + 13));
+ report->readantcap = get_unaligned_be16(resp + 13);
report->assi = resp[15];
report->usn = resp[16];
report->rdsdev = resp[18];
report->assidev = resp[19];
report->strongdev = resp[20];
- report->rdspi = be16_to_cpup((__be16 *)(resp + 21));
+ report->rdspi = get_unaligned_be16(resp + 21);
return err;
}
ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_dbg(&pdev->dev, "no mmio resource defined\n");
- return -ENXIO;
- }
-
ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(ssc->regs))
return PTR_ERR(ssc->regs);
#include <linux/irq.h>
#include <linux/interrupt.h>
-static int irq;
+static int irq = -1;
static irqreturn_t dummy_interrupt(int irq, void *dev_id)
{
static int __init dummy_irq_init(void)
{
+ if (irq < 0) {
+ printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n");
+ return -EIO;
+ }
if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) {
printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq);
return -EIO;
}
}
+ device->event_cb = NULL;
+
mutex_unlock(&dev->device_lock);
if (!device->ops || !device->ops->disable)
/* find ME client we're trying to connect to */
i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
+ if (i < 0 || dev->me_clients[i].props.fixed_address) {
+ dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
+ &data->in_client_uuid);
+ rets = -ENODEV;
+ goto end;
}
+ cl->me_client_id = dev->me_clients[i].client_id;
+ cl->state = MEI_FILE_CONNECTING;
+
dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
cl->me_client_id);
dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
goto end;
}
- if (cl->state != MEI_FILE_CONNECTING) {
- rets = -ENODEV;
- goto end;
- }
-
/* prepare the output buffer */
client = &data->out_client_properties;
rets = mei_cl_connect(cl, file);
end:
- dev_dbg(&dev->pdev->dev, "free connect cb memory.");
return rets;
}
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI && NET
+ depends on X86 && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/socket.h>
+#include <linux/uio.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
mmc_free_host(slot->mmc);
}
-static bool atmci_filter(struct dma_chan *chan, void *slave)
+static bool atmci_filter(struct dma_chan *chan, void *pdata)
{
- struct mci_dma_data *sl = slave;
+ struct mci_platform_data *sl_pdata = pdata;
+ struct mci_dma_data *sl;
+ if (!sl_pdata)
+ return false;
+
+ sl = sl_pdata->dma_slave;
if (sl && find_slave_dev(sl) == chan->device->dev) {
chan->private = slave_data_ptr(sl);
return true;
static bool atmci_configure_dma(struct atmel_mci *host)
{
struct mci_platform_data *pdata;
+ dma_cap_mask_t mask;
if (host == NULL)
return false;
pdata = host->pdev->dev.platform_data;
- if (!pdata)
- return false;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) {
- dma_cap_mask_t mask;
-
- /* Try to grab a DMA channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma.chan =
- dma_request_channel(mask, atmci_filter, pdata->dma_slave);
- }
+ host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
+ &host->pdev->dev, "rxtx");
if (!host->dma.chan) {
dev_warn(&host->pdev->dev, "no DMA channel available\n");
return false;
struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
+ int ret;
pm_runtime_get_sync(mmc_dev(mmc));
break;
case MMC_POWER_ON:
if (!IS_ERR(mmc->supply.vqmmc) &&
- !regulator_is_enabled(mmc->supply.vqmmc))
- regulator_enable(mmc->supply.vqmmc);
+ !regulator_is_enabled(mmc->supply.vqmmc)) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc regulator\n");
+ }
pwr |= MCI_PWR_ON;
break;
*/
struct regulator *vcc;
struct regulator *vcc_aux;
+ int pbias_disable;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
if (!host->vcc)
return 0;
/*
- * With DT, never turn OFF the regulator. This is because
+ * With DT, never turn OFF the regulator for MMC1. This is because
* the pbias cell programming support is still missing when
* booting with Device tree
*/
- if (dev->of_node && !vdd)
+ if (host->pbias_disable && !vdd)
return 0;
if (mmc_slot(host).before_set_reg)
(ios->vdd == DUAL_VOLT_OCR_BIT) &&
/*
* With pbias cell programming missing, this
- * can't be allowed when booting with device
+ * can't be allowed on MMC1 when booting with device
* tree.
*/
- !host->dev->of_node) {
+ !host->pbias_disable) {
/*
* The mmc_select_voltage fn of the core does
* not seem to set the power_mode to
omap_hsmmc_context_save(host);
+ /* This can be removed once we support PBIAS with DT */
+ if (host->dev->of_node && host->mapbase == 0x4809c000)
+ host->pbias_disable = 1;
+
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
/*
* MMC can still work without debounce clock.
omap_hsmmc_conf_bus_power(host);
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (!res) {
- dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
- ret = -ENXIO;
- goto err_irq;
- }
- tx_req = res->start;
+ if (!pdev->dev.of_node) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!res) {
+ dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+ ret = -ENXIO;
+ goto err_irq;
+ }
+ tx_req = res->start;
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (!res) {
- dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
- ret = -ENXIO;
- goto err_irq;
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!res) {
+ dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+ ret = -ENXIO;
+ goto err_irq;
+ }
+ rx_req = res->start;
}
- rx_req = res->start;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+ host->rx_chan =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &rx_req, &pdev->dev, "rx");
+
if (!host->rx_chan) {
dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
ret = -ENXIO;
goto err_irq;
}
- host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+ host->tx_chan =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &tx_req, &pdev->dev, "tx");
+
if (!host->tx_chan) {
dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
ret = -ENXIO;
.enable_dma = sdhci_acpi_enable_dma,
};
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .caps2 = MMC_CAP2_HC_ERASE_SZ,
+ .flags = SDHCI_ACPI_RUNTIME_PM,
+};
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
.pm_caps = MMC_PM_KEEP_POWER,
};
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+};
+
+struct sdhci_acpi_uid_slot {
+ const char *hid;
+ const char *uid;
+ const struct sdhci_acpi_slot *slot;
+};
+
+static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+ { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+ { "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
+ { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
+ { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "PNP0D40" },
+ { },
+};
+
static const struct acpi_device_id sdhci_acpi_ids[] = {
- { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio },
- { "PNP0D40" },
+ { "80860F14" },
+ { "INT33BB" },
+ { "INT33C6" },
+ { "PNP0D40" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid,
+ const char *uid)
{
- const struct acpi_device_id *id;
-
- for (id = sdhci_acpi_ids; id->id[0]; id++)
- if (!strcmp(id->id, hid))
- return (const struct sdhci_acpi_slot *)id->driver_data;
+ const struct sdhci_acpi_uid_slot *u;
+
+ for (u = sdhci_acpi_uids; u->hid; u++) {
+ if (strcmp(u->hid, hid))
+ continue;
+ if (!u->uid)
+ return u->slot;
+ if (uid && !strcmp(u->uid, uid))
+ return u->slot;
+ }
return NULL;
}
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle,
+ const char *hid)
+{
+ const struct sdhci_acpi_slot *slot;
+ struct acpi_device_info *info;
+ const char *uid = NULL;
+ acpi_status status;
+
+ status = acpi_get_object_info(handle, &info);
+ if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
+ uid = info->unique_id.string;
+
+ slot = sdhci_acpi_get_slot_by_ids(hid, uid);
+
+ kfree(info);
+ return slot;
+}
+
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(hid);
+ c->slot = sdhci_acpi_get_slot(handle, hid);
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
goto err_free;
if (c->use_runtime_pm) {
+ pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_use_autosuspend(dev);
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
+ enum {
+ NO_CMD_PENDING, /* no multiblock command pending*/
+ MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
+ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
+ } multiblock_status;
+
};
static struct platform_device_id imx_esdhc_devtype[] = {
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 val = readl(host->ioaddr + reg);
if (unlikely(reg == SDHCI_CAPABILITIES)) {
val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
val |= SDHCI_INT_ADMA_ERROR;
}
+
+ /*
+ * mask off the interrupt we get in response to the manually
+ * sent CMD12
+ */
+ if ((imx_data->multiblock_status == WAIT_FOR_INT) &&
+ ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) {
+ val &= ~SDHCI_INT_RESPONSE;
+ writel(SDHCI_INT_RESPONSE, host->ioaddr +
+ SDHCI_INT_STATUS);
+ imx_data->multiblock_status = NO_CMD_PENDING;
+ }
}
return val;
v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+
+ if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS)
+ {
+ /* send a manual CMD12 with RESPTYP=none */
+ data = MMC_STOP_TRANSMISSION << 24 |
+ SDHCI_CMD_ABORTCMD << 16;
+ writel(data, host->ioaddr + SDHCI_TRANSFER_MODE);
+ imx_data->multiblock_status = WAIT_FOR_INT;
+ }
}
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
}
return;
case SDHCI_COMMAND:
- if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
- host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
- (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
val |= SDHCI_CMD_ABORTCMD;
+ if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+ (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
+
if (is_imx6q_usdhc(imx_data))
writel(val << 16,
host->ioaddr + SDHCI_TRANSFER_MODE);
/*
* Do not touch buswidth bits here. This is done in
* esdhc_pltfm_bus_width.
+ * Do not touch the D3CD bit either which is used for the
+ * SDIO interrupt errata workaround.
*/
- mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK;
+ mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
esdhc_clrset_le(host, mask, new_val, reg);
return;
*/
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
/*
* PCI registers
.probe_slot = pch_hc_probe_slot,
};
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
+ slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+ return 0;
+}
+
+static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+ .allow_runtime_pm = true,
+ .probe_slot = byt_emmc_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .allow_runtime_pm = true,
+ .probe_slot = byt_sdio_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+};
+
/* O2Micro extra registers */
#define O2_SD_LOCK_WP 0xD3
#define O2_SD_MULTI_VCC3V 0xEE
.driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
}
rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (rc == NULL) {
- dev_err(&pdev->dev, "No memory resource found for device!\r\n");
- return -ENXIO;
- }
-
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
}
/**
- * bond_3ad_get_active_agg_info - get information of the active aggregator
+ * __bond_3ad_get_active_agg_info - get information of the active aggregator
* @bond: bonding struct to work on
* @ad_info: ad_info struct to fill with the bond's info
*
* Returns: 0 on success
* < 0 on error
*/
-int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+int __bond_3ad_get_active_agg_info(struct bonding *bond,
+ struct ad_info *ad_info)
{
struct aggregator *aggregator = NULL;
struct port *port;
return -1;
}
+/* Wrapper used to hold bond->lock so no slave manipulation can occur */
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+{
+ int ret;
+
+ read_lock(&bond->lock);
+ ret = __bond_3ad_get_active_agg_info(bond, ad_info);
+ read_unlock(&bond->lock);
+
+ return ret;
+}
+
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
{
struct slave *slave, *start_at;
struct ad_info ad_info;
int res = 1;
- if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
- pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
+ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
goto out;
}
void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
+int __bond_3ad_get_active_agg_info(struct bonding *bond,
+ struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
slave->dev->features,
mask);
}
+ features = netdev_add_tso_features(features, mask);
out:
read_unlock(&bond->lock);
{
struct sk_buff *skb;
- pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
- slave_dev->name, dest_ip, src_ip, vlan_id);
+ pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
+ slave_dev->name, &dest_ip, &src_ip, vlan_id);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
__be32 addr;
if (!targets[i])
break;
- pr_debug("basa: target %x\n", targets[i]);
+ pr_debug("basa: target %pI4\n", &targets[i]);
if (!bond_vlan_used(bond)) {
pr_debug("basa: empty vlan: arp_send\n");
addr = bond_confirm_addr(bond->dev, targets[i], 0);
static int bond_check_params(struct bond_params *params)
{
- int arp_validate_value, fail_over_mac_value, primary_reselect_value;
+ int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
/*
* Convert string parameters.
arp_interval = BOND_LINK_ARP_INTERV;
}
- for (arp_ip_count = 0;
- (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count];
- arp_ip_count++) {
+ for (arp_ip_count = 0, i = 0;
+ (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
/* not complete check, but should be good enough to
catch mistakes */
- __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
- if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
- ip == 0 || ip == htonl(INADDR_BROADCAST)) {
+ __be32 ip = in_aton(arp_ip_target[i]);
+ if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
+ ip == htonl(INADDR_BROADCAST)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
- arp_ip_target[arp_ip_count]);
+ arp_ip_target[i]);
arp_interval = 0;
} else {
- arp_target[arp_ip_count] = ip;
+ arp_target[arp_ip_count++] = ip;
}
}
if (miimon) {
pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
- int i;
-
pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
arp_interval,
arp_validate_tbl[arp_validate_value].modename,
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
ad_select_tbl[bond->params.ad_select].modename);
- if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
bond->dev->name);
} else {
int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (bond->dev->flags & IFF_UP) {
pr_err("unable to update mode of %s because interface is up.\n",
bond->dev->name);
bond->dev->name, bond_mode_tbl[new_value].modename,
new_value);
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
-
/*
* Show current 802.3ad aggregator ID.
*/
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
- (bond_3ad_get_active_agg_info(bond, &ad_info))
+ bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.aggregator_id);
}
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
- (bond_3ad_get_active_agg_info(bond, &ad_info))
+ bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.ports);
}
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
- (bond_3ad_get_active_agg_info(bond, &ad_info))
+ bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.actor_key);
}
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
- (bond_3ad_get_active_agg_info(bond, &ad_info))
+ bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.partner_key);
}
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
- depends on CAIF
+ depends on CAIF && HAS_DMA
select VHOST_RING
select VIRTIO
select GENERIC_ALLOCATOR
{
struct esd_usb2 *dev = priv->usb2;
struct net_device *netdev = priv->netdev;
- struct esd_usb2_msg msg;
+ struct esd_usb2_msg *msg;
int err, i;
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
/*
* Enable all IDs
* The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
* the number of the starting bitmask (0..64) to the filter.option
* field followed by only some bitmasks.
*/
- msg.msg.hdr.cmd = CMD_IDADD;
- msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
- msg.msg.filter.net = priv->index;
- msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+ msg->msg.hdr.cmd = CMD_IDADD;
+ msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+ msg->msg.filter.net = priv->index;
+ msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
- msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff);
+ msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff);
/* enable 29bit extended IDs */
- msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
+ msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
- err = esd_usb2_send_msg(dev, &msg);
+ err = esd_usb2_send_msg(dev, msg);
if (err)
- goto failed;
+ goto out;
err = esd_usb2_setup_rx_urbs(dev);
if (err)
- goto failed;
+ goto out;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- return 0;
-
-failed:
+out:
if (err == -ENODEV)
netif_device_detach(netdev);
+ if (err)
+ netdev_err(netdev, "couldn't start device: %d\n", err);
- netdev_err(netdev, "couldn't start device: %d\n", err);
-
+ kfree(msg);
return err;
}
static int esd_usb2_close(struct net_device *netdev)
{
struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2_msg msg;
+ struct esd_usb2_msg *msg;
int i;
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
/* Disable all IDs (see esd_usb2_start()) */
- msg.msg.hdr.cmd = CMD_IDADD;
- msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
- msg.msg.filter.net = priv->index;
- msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+ msg->msg.hdr.cmd = CMD_IDADD;
+ msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+ msg->msg.filter.net = priv->index;
+ msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
- msg.msg.filter.mask[i] = 0;
- if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+ msg->msg.filter.mask[i] = 0;
+ if (esd_usb2_send_msg(priv->usb2, msg) < 0)
netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */
- msg.msg.hdr.len = 2;
- msg.msg.hdr.cmd = CMD_SETBAUD;
- msg.msg.setbaud.net = priv->index;
- msg.msg.setbaud.rsvd = 0;
- msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
- if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+ msg->msg.hdr.len = 2;
+ msg->msg.hdr.cmd = CMD_SETBAUD;
+ msg->msg.setbaud.net = priv->index;
+ msg->msg.setbaud.rsvd = 0;
+ msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
+ if (esd_usb2_send_msg(priv->usb2, msg) < 0)
netdev_err(netdev, "sending setbaud message failed\n");
priv->can.state = CAN_STATE_STOPPED;
close_candev(netdev);
+ kfree(msg);
+
return 0;
}
{
struct esd_usb2_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct esd_usb2_msg msg;
+ struct esd_usb2_msg *msg;
+ int err;
u32 canbtr;
int sjw_shift;
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
canbtr |= ESD_USB2_3_SAMPLES;
- msg.msg.hdr.len = 2;
- msg.msg.hdr.cmd = CMD_SETBAUD;
- msg.msg.setbaud.net = priv->index;
- msg.msg.setbaud.rsvd = 0;
- msg.msg.setbaud.baud = cpu_to_le32(canbtr);
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->msg.hdr.len = 2;
+ msg->msg.hdr.cmd = CMD_SETBAUD;
+ msg->msg.setbaud.net = priv->index;
+ msg->msg.setbaud.rsvd = 0;
+ msg->msg.setbaud.baud = cpu_to_le32(canbtr);
netdev_info(netdev, "setting BTR=%#x\n", canbtr);
- return esd_usb2_send_msg(priv->usb2, &msg);
+ err = esd_usb2_send_msg(priv->usb2, msg);
+
+ kfree(msg);
+ return err;
}
static int esd_usb2_get_berr_counter(const struct net_device *netdev,
const struct usb_device_id *id)
{
struct esd_usb2 *dev;
- struct esd_usb2_msg msg;
+ struct esd_usb2_msg *msg;
int i, err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
usb_set_intfdata(intf, dev);
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto free_msg;
+ }
+
/* query number of CAN interfaces (nets) */
- msg.msg.hdr.cmd = CMD_VERSION;
- msg.msg.hdr.len = 2;
- msg.msg.version.rsvd = 0;
- msg.msg.version.flags = 0;
- msg.msg.version.drv_version = 0;
+ msg->msg.hdr.cmd = CMD_VERSION;
+ msg->msg.hdr.len = 2;
+ msg->msg.version.rsvd = 0;
+ msg->msg.version.flags = 0;
+ msg->msg.version.drv_version = 0;
- err = esd_usb2_send_msg(dev, &msg);
+ err = esd_usb2_send_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "sending version message failed\n");
- goto free_dev;
+ goto free_msg;
}
- err = esd_usb2_wait_msg(dev, &msg);
+ err = esd_usb2_wait_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "no version message answer\n");
- goto free_dev;
+ goto free_msg;
}
- dev->net_count = (int)msg.msg.version_reply.nets;
- dev->version = le32_to_cpu(msg.msg.version_reply.version);
+ dev->net_count = (int)msg->msg.version_reply.nets;
+ dev->version = le32_to_cpu(msg->msg.version_reply.version);
if (device_create_file(&intf->dev, &dev_attr_firmware))
dev_err(&intf->dev,
for (i = 0; i < dev->net_count; i++)
esd_usb2_probe_one_net(intf, i);
- return 0;
-
-free_dev:
- kfree(dev);
+free_msg:
+ kfree(msg);
+ if (err)
+ kfree(dev);
done:
return err;
}
#define KVASER_CTRL_MODE_SELFRECEPTION 3
#define KVASER_CTRL_MODE_OFF 4
+/* log message */
+#define KVASER_EXTENDED_FRAME BIT(31)
+
struct kvaser_msg_simple {
u8 tid;
u8 channel;
priv = dev->nets[channel];
stats = &priv->netdev->stats;
- if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
- MSG_FLAG_OVERRUN)) {
+ if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
+ (msg->id == CMD_LOG_MESSAGE)) {
+ kvaser_usb_rx_error(dev, msg);
+ return;
+ } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
kvaser_usb_rx_can_err(priv, msg);
return;
} else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
return;
}
- cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
- (msg->u.rx_can.msg[1] & 0x3f);
- cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+ if (msg->id == CMD_LOG_MESSAGE) {
+ cf->can_id = le32_to_cpu(msg->u.log_message.id);
+ if (cf->can_id & KVASER_EXTENDED_FRAME)
+ cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ cf->can_id &= CAN_SFF_MASK;
- if (msg->id == CMD_RX_EXT_MESSAGE) {
- cf->can_id <<= 18;
- cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
- ((msg->u.rx_can.msg[3] & 0xff) << 6) |
- (msg->u.rx_can.msg[4] & 0x3f);
- cf->can_id |= CAN_EFF_FLAG;
- }
+ cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
- if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+ if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &msg->u.log_message.data,
+ cf->can_dlc);
+ } else {
+ cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+ (msg->u.rx_can.msg[1] & 0x3f);
+
+ if (msg->id == CMD_RX_EXT_MESSAGE) {
+ cf->can_id <<= 18;
+ cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+ ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+ (msg->u.rx_can.msg[4] & 0x3f);
+ cf->can_id |= CAN_EFF_FLAG;
+ }
+
+ cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+ if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &msg->u.rx_can.msg[6],
+ cf->can_dlc);
+ }
netif_rx(skb);
case CMD_RX_STD_MESSAGE:
case CMD_RX_EXT_MESSAGE:
+ case CMD_LOG_MESSAGE:
kvaser_usb_rx_can_msg(dev, msg);
break;
kvaser_usb_rx_error(dev, msg);
break;
- case CMD_LOG_MESSAGE:
- if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
- kvaser_usb_rx_error(dev, msg);
- break;
-
case CMD_TX_ACKNOWLEDGE:
kvaser_usb_tx_acknowledge(dev, msg);
break;
return usb_submit_urb(urb, GFP_ATOMIC);
}
-static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
+static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
{
- u8 buffer[16];
+ u8 *buffer;
+ int err;
+
+ buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
buffer[0] = 0;
buffer[1] = !!loaded;
- pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
- PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer));
+ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
+ PCAN_USBPRO_FCT_DRVLD, buffer,
+ PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
+ kfree(buffer);
+
+ return err;
}
static inline
*/
static int pcan_usb_pro_init(struct peak_usb_device *dev)
{
- struct pcan_usb_pro_interface *usb_if;
struct pcan_usb_pro_device *pdev =
container_of(dev, struct pcan_usb_pro_device, dev);
+ struct pcan_usb_pro_interface *usb_if = NULL;
+ struct pcan_usb_pro_fwinfo *fi = NULL;
+ struct pcan_usb_pro_blinfo *bi = NULL;
+ int err;
/* do this for 1st channel only */
if (!dev->prev_siblings) {
- struct pcan_usb_pro_fwinfo fi;
- struct pcan_usb_pro_blinfo bi;
- int err;
-
/* allocate netdevices common structure attached to first one */
usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface),
GFP_KERNEL);
- if (!usb_if)
- return -ENOMEM;
+ fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL);
+ bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL);
+ if (!usb_if || !fi || !bi) {
+ err = -ENOMEM;
+ goto err_out;
+ }
/* number of ts msgs to ignore before taking one into account */
usb_if->cm_ignore_count = 5;
*/
err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
PCAN_USBPRO_INFO_FW,
- &fi, sizeof(fi));
+ fi, sizeof(*fi));
if (err) {
- kfree(usb_if);
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
pcan_usb_pro.name, err);
- return err;
+ goto err_out;
}
err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
PCAN_USBPRO_INFO_BL,
- &bi, sizeof(bi));
+ bi, sizeof(*bi));
if (err) {
- kfree(usb_if);
dev_err(dev->netdev->dev.parent,
"unable to read %s bootloader info (err %d)\n",
pcan_usb_pro.name, err);
- return err;
+ goto err_out;
}
+ /* tell the device the can driver is running */
+ err = pcan_usb_pro_drv_loaded(dev, 1);
+ if (err)
+ goto err_out;
+
dev_info(dev->netdev->dev.parent,
"PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n",
pcan_usb_pro.name,
- bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo,
+ bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo,
pcan_usb_pro.ctrl_count);
-
- /* tell the device the can driver is running */
- pcan_usb_pro_drv_loaded(dev, 1);
} else {
usb_if = pcan_usb_pro_dev_if(dev->prev_siblings);
}
pcan_usb_pro_set_led(dev, 0, 1);
return 0;
+
+ err_out:
+ kfree(bi);
+ kfree(fi);
+ kfree(usb_if);
+
+ return err;
}
static void pcan_usb_pro_exit(struct peak_usb_device *dev)
/* Vendor Request value for XXX_FCT */
#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */
+#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16
/* PCAN_USBPRO_INFO_BL vendor request record type */
struct __packed pcan_usb_pro_blinfo {
pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
open:1,
medialock:1,
- must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
large_frames:1, /* accept large frames */
handling_irq:1; /* private in_irq indicator */
/* {get|set}_wol operations are already serialized by rtnl.
if (rc < 0)
goto out;
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc < 0) {
+ pci_disable_device(pdev);
+ goto out;
+ }
+
unit = vortex_cards_found;
if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr) {
+ pci_release_regions(pdev);
pci_disable_device(pdev);
rc = -ENOMEM;
goto out;
ent->driver_data, unit);
if (rc < 0) {
pci_iounmap(pdev, ioaddr);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
goto out;
}
/* PCI-only startup logic */
if (pdev) {
- /* EISA resources already marked, so only PCI needs to do this here */
- /* Ignore return value, because Cardbus drivers already allocate for us */
- if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
- vp->must_free_region = 1;
-
/* enable bus-mastering if necessary */
if (vci->flags & PCI_USES_MASTER)
pci_set_master(pdev);
&vp->rx_ring_dma);
retval = -ENOMEM;
if (!vp->rx_ring)
- goto free_region;
+ goto free_device;
vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring,
vp->rx_ring_dma);
-free_region:
- if (vp->must_free_region)
- release_region(dev->base_addr, vci->io_size);
+free_device:
free_netdev(dev);
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
out:
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring,
vp->rx_ring_dma);
- if (vp->must_free_region)
- release_region(dev->base_addr, vp->io_size);
+
+ pci_release_regions(pdev);
+
free_netdev(dev);
}
rc |= XMIT_CSUM_TCP;
if (skb_is_gso_v6(skb)) {
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
if (rc & XMIT_CSUM_ENC)
rc |= XMIT_GSO_ENC_V6;
} else if (skb_is_gso(skb)) {
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
if (rc & XMIT_CSUM_ENC)
rc |= XMIT_GSO_ENC_V4;
}
*/
static void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
+ struct eth_tx_start_bd *tx_start_bd,
u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
- } else
+ /* GSO on 57710/57711 needs FW to calculate IP checksum */
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
+ } else {
pbd->tcp_pseudo_csum =
bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
+ }
pbd->global_data |=
cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
{
u16 hlen_w = 0;
u8 outerip_off, outerip_len = 0;
+
/* from outer IP to transport */
hlen_w = (skb_inner_transport_header(skb) -
skb_network_header(skb)) >> 1;
/* transport len */
- if (xmit_type & XMIT_CSUM_TCP)
- hlen_w += inner_tcp_hdrlen(skb) >> 1;
- else
- hlen_w += sizeof(struct udphdr) >> 1;
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
pbd2->fw_ip_hdr_to_payload_w = hlen_w;
- if (xmit_type & XMIT_CSUM_ENC_V4) {
+ /* outer IP header info */
+ if (xmit_type & XMIT_CSUM_V4) {
struct iphdr *iph = ip_hdr(skb);
pbd2->fw_ip_csum_wo_len_flags_frag =
bswab16(csum_fold((~iph->check) -
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
else
- bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+ bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
}
/* Set the PBD's parsing_data field if not zero
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 131
+#define TG3_MIN_NUM 132
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "April 09, 2013"
+#define DRV_MODULE_RELDATE "May 21, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
return 0;
}
+static bool tg3_phy_power_bug(struct tg3 *tp)
+{
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5700:
+ case ASIC_REV_5704:
+ return true;
+ case ASIC_REV_5780:
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ return true;
+ return false;
+ case ASIC_REV_5717:
+ if (!tp->pci_fn)
+ return true;
+ return false;
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !tp->pci_fn)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
/* The PHY should not be powered down on some chips because
* of bugs.
*/
- if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
- tg3_asic_rev(tp) == ASIC_REV_5704 ||
- (tg3_asic_rev(tp) == ASIC_REV_5780 &&
- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
- (tg3_asic_rev(tp) == ASIC_REV_5717 &&
- !tp->pci_fn))
+ if (tg3_phy_power_bug(tp))
return;
if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
return (base > 0xffffdcc0) && (base + len + 8 < base);
}
+/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
+ * of any 4GB boundaries: 4G, 8G, etc
+ */
+static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ u32 len, u32 mss)
+{
+ if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
+ u32 base = (u32) mapping & 0xffffffff;
+
+ return ((base + len + (mss & 0x3fff)) < base);
+ }
+ return 0;
+}
+
/* Test for DMA addresses > 40-bit */
static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
int len)
if (tg3_4g_overflow_test(map, len))
hwbug = true;
+ if (tg3_4g_tso_overflow_test(tp, map, len, mss))
+ hwbug = true;
+
if (tg3_40bit_overflow_test(tp, map, len))
hwbug = true;
tg3_halt_cpu(tp, RX_CPU_BASE);
}
+ err = tg3_poll_fw(tp);
+ if (err)
+ return err;
+
tw32(GRC_MODE, tp->grc_mode);
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
- err = tg3_poll_fw(tp);
- if (err)
- return err;
-
tg3_mdio_start(tp);
if (tg3_flag(tp, PCI_EXPRESS) &&
}
}
+static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
+{
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
+ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
+ else
+ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
{
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
- if (tg3_asic_rev(tp) == ASIC_REV_5719) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
break;
}
if (i < TG3_NUM_RDMA_CHANNELS) {
val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
+ val |= tg3_lso_rd_dma_workaround_bit(tp);
tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
- tg3_flag_set(tp, 5719_RDMA_BUG);
+ tg3_flag_set(tp, 5719_5720_RDMA_BUG);
}
}
TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
- if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
+ if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
(sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
u32 val;
val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
+ val &= ~tg3_lso_rd_dma_workaround_bit(tp);
tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
- tg3_flag_clear(tp, 5719_RDMA_BUG);
+ tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
}
TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
-#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000
/* 0x4914 --> 0x4be0 unused */
#define TG3_NUM_RDMA_CHANNELS 4
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_TX_TSTAMP_EN,
TG3_FLAG_4K_FIFO_LIMIT,
- TG3_FLAG_5719_RDMA_BUG,
+ TG3_FLAG_5719_5720_RDMA_BUG,
TG3_FLAG_RESET_TASK_PENDING,
TG3_FLAG_PTP_CAPABLE,
TG3_FLAG_5705_PLUS,
sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
-
- if (!bnad->work_q)
+ if (!bnad->work_q) {
+ iounmap(bnad->bar0);
return -ENOMEM;
+ }
return 0;
}
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on GENERIC_HARDIRQS
+ depends on GENERIC_HARDIRQS && HAS_DMA
select NET_CORE
select MACB
---help---
config MACB
tristate "Cadence MACB/GEM support"
+ depends on HAS_DMA
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
- macb_writel(bp, ISR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
}
}
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+ if (macb_is_gem(bp)) {
+ if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+ bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+ }
+}
+
static void macb_init_hw(struct macb *bp)
{
u32 config;
bp->duplex = DUPLEX_HALF;
macb_configure_dma(bp);
+ macb_configure_caps(bp);
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
#define MACB_REV_SIZE 16
/* Bitfields in DCFG1. */
+#define GEM_IRQCOR_OFFSET 23
+#define GEM_IRQCOR_SIZE 1
#define GEM_DBWDEF_OFFSET 25
#define GEM_DBWDEF_SIZE 3
#define MACB_MAN_READ 2
#define MACB_MAN_CODE 2
+/* Capability mask bits */
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1
+
/* Bit manipulation macros */
#define MACB_BIT(name) \
(1 << MACB_##name##_OFFSET)
unsigned int speed;
unsigned int duplex;
+ u32 caps;
+
phy_interface_t phy_interface;
/* AT91RM9200 transmit */
config NET_CALXEDA_XGMAC
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
select CRC32
help
This is the driver for the XGMAC Ethernet IP block found on Calxeda
u8 ipv6;
u8 vtm;
u8 pkt_type;
+ u8 ip_frag;
};
struct be_rx_obj {
resource_error = lancer_provisioning_error(adapter);
if (resource_error)
- return -1;
+ return -EAGAIN;
status = lancer_wait_ready(adapter);
if (!status) {
* when PF provisions resources.
*/
resource_error = lancer_provisioning_error(adapter);
- if (status == -1 && !resource_error)
- adapter->eeh_error = true;
+ if (resource_error)
+ status = -EAGAIN;
return status;
}
for (i = 0; i < desc_count; i++) {
desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
if (((void *)desc + desc->desc_len) >
- (void *)(buf + max_buf_size)) {
- desc = NULL;
- break;
- }
+ (void *)(buf + max_buf_size))
+ return NULL;
if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
- break;
+ return desc;
desc = (void *)desc + desc->desc_len;
}
- if (!desc || i == MAX_RESOURCE_DESC)
- return NULL;
-
- return desc;
+ return NULL;
}
/* Uses Mbox */
u8 ip_version; /* dword 1 */
u8 macdst[6]; /* dword 1 */
u8 vtp; /* dword 1 */
- u8 rsvd0; /* dword 1 */
+ u8 ip_frag; /* dword 1 */
u8 fragndx[10]; /* dword 1 */
u8 ct[2]; /* dword 1 */
u8 sw; /* dword 1 */
if (unlikely(!skb))
return skb;
- if (vlan_tx_tag_present(skb)) {
+ if (vlan_tx_tag_present(skb))
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (skb)
- skb->vlan_tci = 0;
- }
-
- if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
- if (!vlan_tag)
- vlan_tag = adapter->pvid;
- if (skip_hw_vlan)
- *skip_hw_vlan = true;
- }
+ else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
+ vlan_tag = adapter->pvid;
if (vlan_tag) {
skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
if (unlikely(!skb))
return skb;
-
skb->vlan_tci = 0;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
}
/* Insert the outer VLAN, if any */
compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
+ rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ ip_frag, compl);
}
static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
else
be_parse_rx_compl_v0(compl, rxcp);
+ if (rxcp->ip_frag)
+ rxcp->l4_csum = 0;
+
if (rxcp->vlanf) {
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
static inline bool do_gro(struct be_rx_compl_info *rxcp)
{
- return (rxcp->tcpf && !rxcp->err) ? true : false;
+ return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
static int lancer_recover_func(struct be_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
int status;
status = lancer_test_and_set_rdy_state(adapter);
be_clear(adapter);
- adapter->hw_error = false;
- adapter->fw_timeout = false;
+ be_clear_all_error(adapter);
status = be_setup(adapter);
if (status)
goto err;
}
- dev_err(&adapter->pdev->dev,
- "Adapter SLIPORT recovery succeeded\n");
+ dev_err(dev, "Error recovery successful\n");
return 0;
err:
- if (adapter->eeh_error)
- dev_err(&adapter->pdev->dev,
- "Adapter SLIPORT recovery failed\n");
+ if (status == -EAGAIN)
+ dev_err(dev, "Waiting for resource provisioning\n");
+ else
+ dev_err(dev, "Error recovery failed\n");
return status;
}
{
struct be_adapter *adapter =
container_of(work, struct be_adapter, func_recovery_work.work);
- int status;
+ int status = 0;
be_detect_error(adapter);
if (adapter->hw_error && lancer_chip(adapter)) {
- if (adapter->eeh_error)
- goto out;
-
rtnl_lock();
netif_device_detach(adapter->netdev);
rtnl_unlock();
status = lancer_recover_func(adapter);
-
if (!status)
netif_device_attach(adapter->netdev);
}
-out:
- schedule_delayed_work(&adapter->func_recovery_work,
- msecs_to_jiffies(1000));
+ /* In Lancer, for all errors other than provisioning error (-EAGAIN),
+ * no need to attempt further recovery.
+ */
+ if (!status || status == -EAGAIN)
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
}
static void be_worker(struct work_struct *work)
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- adapter->eeh_error = true;
-
- cancel_delayed_work_sync(&adapter->func_recovery_work);
+ if (!adapter->eeh_error) {
+ adapter->eeh_error = true;
- rtnl_lock();
- netif_device_detach(netdev);
- rtnl_unlock();
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
- if (netif_running(netdev)) {
rtnl_lock();
- be_close(netdev);
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ be_close(netdev);
rtnl_unlock();
+
+ be_clear(adapter);
}
- be_clear(adapter);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
int status;
dev_info(&adapter->pdev->dev, "EEH reset\n");
- be_clear_all_error(adapter);
status = pci_enable_device(pdev);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
pci_cleanup_aer_uncorrect_error_status(pdev);
+ be_clear_all_error(adapter);
return PCI_ERS_RESULT_RECOVERED;
}
#define FEC_QUIRK_HAS_GBIT (1 << 3)
/* Controller has extend desc buffer */
#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM (1 << 5)
static struct platform_device_id fec_devtype[] = {
{
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX,
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM,
}, {
- .name = "mvf-fec",
+ .name = "mvf600-fec",
.driver_data = FEC_QUIRK_ENET_MAC,
}, {
/* sentinel */
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
IMX6Q_FEC,
- MVF_FEC,
+ MVF600_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
- { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], },
+ { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
netif_device_detach(ndev);
napi_disable(&fep->napi);
netif_stop_queue(ndev);
- netif_tx_lock(ndev);
+ netif_tx_lock_bh(ndev);
}
/* Whack a reset. We should wait for this. */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
if (netif_running(ndev)) {
- netif_device_attach(ndev);
- napi_enable(&fep->napi);
+ netif_tx_unlock_bh(ndev);
netif_wake_queue(ndev);
- netif_tx_unlock(ndev);
+ napi_enable(&fep->napi);
+ netif_device_attach(ndev);
}
}
iap = &tmpaddr[0];
}
+ /*
+ * 5) random mac address
+ */
+ if (!is_valid_ether_addr(iap)) {
+ /* Report it and use a random ethernet address instead */
+ netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+ eth_hw_addr_random(ndev);
+ netdev_info(ndev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ return;
+ }
+
memcpy(ndev->dev_addr, iap, ETH_ALEN);
/* Adjust MAC if using macaddr */
static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *cbd_base;
/* Allocate memory for buffer descriptors. */
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
- /* enable hw accelerator */
- ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
- ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
- fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+ /* enable hw accelerator */
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_RXCSUM);
+ ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_RXCSUM);
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ }
fec_restart(ndev, 0);
return 0;
no_clock:
+ iounmap(etsects->regs);
no_ioremap:
release_resource(etsects->rsrc);
no_resource:
}
#ifdef CONFIG_PPC_DCR_NATIVE
- /* Enable internal clock source */
- if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_ETH_CFG,
- 0, SDR0_ETH_CFG_ECS << dev->cell_index);
+ /*
+ * PPC460EX/GT Embedded Processor Advanced User's Manual
+ * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
+ * Note: The PHY must provide a TX Clk in order to perform a soft reset
+ * of the EMAC. If none is present, select the internal clock
+ * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
+ * After a soft reset, select the external clock.
+ */
+ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+ if (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff) {
+ /* No PHY: select internal loop clock before reset */
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, SDR0_ETH_CFG_ECS << dev->cell_index);
+ } else {
+ /* PHY present: select external clock before reset */
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+ }
+ }
#endif
out_be32(&p->mr0, EMAC_MR0_SRST);
--n;
#ifdef CONFIG_PPC_DCR_NATIVE
- /* Enable external clock source */
- if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_ETH_CFG,
- SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+ if (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff) {
+ /* No PHY: restore external clock source after reset */
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+ }
+ }
#endif
if (n) {
/* TFD data structure masks. */
/* TFDList, TFC */
-#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
-#define IPG_TFC_FRAMEID 0x000000000000FFFF
-#define IPG_TFC_WORDALIGN 0x0000000000030000
-#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
-#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
-#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
-#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
-#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
-#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
-#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
-#define IPG_TFC_TXINDICATE 0x0000000000400000
-#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
-#define IPG_TFC_FRAGCOUNT 0x000000000F000000
-#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
-#define IPG_TFC_TFDDONE 0x0000000080000000
-#define IPG_TFC_VID 0x00000FFF00000000
-#define IPG_TFC_CFI 0x0000100000000000
-#define IPG_TFC_USERPRIORITY 0x0000E00000000000
+#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
+#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
+#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
+#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
+#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
+#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
+#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
+#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
+#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
+#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
+#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
+#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
+#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
+#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
+#define IPG_TFC_TFDDONE 0x0000000080000000ULL
+#define IPG_TFC_VID 0x00000FFF00000000ULL
+#define IPG_TFC_CFI 0x0000100000000000ULL
+#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
/* TFDList, FragInfo */
-#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
-#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
+#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
+#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
/* RFD data structure masks. */
/* RFDList, RFS */
-#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
-#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
-#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
-#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
-#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
-#define IPG_RFS_RXFCSERROR 0x0000000000080000
-#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
-#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
-#define IPG_RFS_VLANDETECTED 0x0000000000400000
-#define IPG_RFS_TCPDETECTED 0x0000000000800000
-#define IPG_RFS_TCPERROR 0x0000000001000000
-#define IPG_RFS_UDPDETECTED 0x0000000002000000
-#define IPG_RFS_UDPERROR 0x0000000004000000
-#define IPG_RFS_IPDETECTED 0x0000000008000000
-#define IPG_RFS_IPERROR 0x0000000010000000
-#define IPG_RFS_FRAMESTART 0x0000000020000000
-#define IPG_RFS_FRAMEEND 0x0000000040000000
-#define IPG_RFS_RFDDONE 0x0000000080000000
-#define IPG_RFS_TCI 0x0000FFFF00000000
+#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
+#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
+#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
+#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
+#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
+#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
+#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
+#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
+#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
+#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
+#define IPG_RFS_TCPERROR 0x0000000001000000ULL
+#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
+#define IPG_RFS_UDPERROR 0x0000000004000000ULL
+#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
+#define IPG_RFS_IPERROR 0x0000000010000000ULL
+#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
+#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
+#define IPG_RFS_RFDDONE 0x0000000080000000ULL
+#define IPG_RFS_TCI 0x0000FFFF00000000ULL
/* RFDList, FragInfo */
-#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
-#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
+#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
+#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
/* I/O Register masks. */
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock_bh(nq);
reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
dev_kfree_skb(skb);
}
- __netif_tx_unlock(nq);
+ __netif_tx_unlock_bh(nq);
if (reclaimed < budget)
mp->work_tx &= ~(1 << txq->index);
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
init_timer(&mp->rx_oom);
mp->rx_oom.data = (unsigned long)mp;
* FLR process. The only non-zero result in the RESET command
* is MLX4_DELAY_RESET_SLAVE*/
if ((MLX4_COMM_CMD_RESET == cmd)) {
- mlx4_warn(dev, "Got slave FLRed from Communication"
- " channel (ret:0x%x)\n", ret_from_pending);
err = MLX4_DELAY_RESET_SLAVE;
} else {
mlx4_warn(dev, "Communication channel timed out\n");
priv->last_moder_time[ring] = moder_time;
cq = &priv->rx_cq[ring];
cq->moder_time = moder_time;
+ cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
en_err(priv, "Failed modifying moderation for cq:%d\n",
struct mlx4_en_priv *priv;
int i;
int err;
+ u64 mac_u64;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
- priv->port, dev->dev_addr);
- err = -EINVAL;
- goto out;
+ if (mlx4_is_slave(priv->mdev->dev)) {
+ eth_hw_addr_random(dev);
+ en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
+ mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+ mdev->dev->caps.def_mac[priv->port] = mac_u64;
+ } else {
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
+ err = -EINVAL;
+ goto out;
+ }
}
memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
if (user_prio >= 0) {
context->pri_path.sched_queue |= user_prio << 3;
- context->pri_path.feup = 1 << 6;
+ context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
context->pri_path.counter_index = 0xff;
context->cqn_send = cpu_to_be32(cqn);
[2] = "RSS XOR Hash Function support",
[3] = "Device manage flow steering support",
[4] = "Automatic MAC reassignment support",
- [5] = "Time stamping support"
+ [5] = "Time stamping support",
+ [6] = "VST (control vlan insertion/stripping) support",
+ [7] = "FSM (MAC anti-spoofing) support"
};
int i;
MLX4_CMD_NATIVE);
if (!err && dev->caps.function != slave) {
- /* set slave default_mac address */
- MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
- def_mac += slave << 8;
/* if config MAC in DB use it */
if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
+ else {
+ /* set slave default_mac address */
+ MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+ def_mac += slave << 8;
+ priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
+ }
+
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 dma = (u64) priv->mfunc.vhcr_dma;
- int num_of_reset_retries = NUM_OF_RESET_RETRIES;
int ret_from_reset = 0;
u32 slave_read;
u32 cmd_channel_ver;
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
- msleep(SLEEP_TIME_IN_RESET);
- while (ret_from_reset && num_of_reset_retries) {
- mlx4_warn(dev, "slave is currently in the"
- "middle of FLR. retrying..."
- "(try num:%d)\n",
- (NUM_OF_RESET_RETRIES -
- num_of_reset_retries + 1));
- ret_from_reset =
- mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
- 0, MLX4_COMM_TIME);
- num_of_reset_retries = num_of_reset_retries - 1;
- }
+ mlx4_warn(dev, "slave is currently in the "
+ "middle of FLR. Deferring probe.\n");
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
+ return -EPROBE_DEFER;
} else
goto err;
}
} else {
err = mlx4_init_slave(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize slave\n");
+ if (err != -EPROBE_DEFER)
+ mlx4_err(dev, "Failed to initialize slave\n");
return err;
}
if (MLX4_QP_ST_RC == qp_type)
return -EINVAL;
+ /* force strip vlan by clear vsd */
+ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ if (0 != vp_oper->state.default_vlan) {
+ qpc->pri_path.vlan_control =
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ } else { /* priority tagged */
+ qpc->pri_path.vlan_control =
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
+ }
+
+ qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
- qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
- qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
+ qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
- mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
- be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
- (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
- vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
- (int)(qpc->pri_path.fl));
}
if (vp_oper->state.spoofchk) {
- qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
+ qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
- mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
- be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
- (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
- vp_oper->mac_idx);
}
return 0;
}
u16 port_type;
u16 board_type;
+ u16 supported_type;
u16 link_speed;
u16 link_duplex;
#define QLCNIC_FW_HANG 0x4000
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_TX_INTR_SHARED 0x10000
+#define QLCNIC_APP_CHANGED_FLAGS 0x20000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter) \
+ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
spinlock_t rx_mac_learn_lock;
u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
+ u32 offload_flags;
const struct firmware *fw;
};
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_schedule_multi(struct net_device *);
void qlcnic_vf_add_mc_list(struct net_device *, u16);
return 1;
}
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
{
u32 data;
- unsigned long wait_time = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
/* wait for mailbox completion */
do {
data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
- if (++wait_time > QLCNIC_MBX_TIMEOUT) {
+ if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
data = QLCNIC_RCODE_TIMEOUT;
break;
}
u16 opcode;
u8 mbx_err_code;
unsigned long flags;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
opcode = LSW(cmd->req.arg[0]);
if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
/* Signal FW about the impending command */
QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
poll:
- rsp = qlcnic_83xx_mbx_poll(adapter);
+ rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
if (rsp != QLCNIC_RCODE_TIMEOUT) {
/* Get the FW response data */
fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val)
- goto poll;
+ goto poll;
}
mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
return err;
}
-static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+ int num_sds_ring)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_rds_ring *rds_ring;
+ u16 adapter_state = adapter->is_up;
u8 ring;
int ret;
ret = qlcnic_fw_create_ctx(adapter);
if (ret) {
qlcnic_detach(adapter);
+ if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+ adapter->max_sds_rings = num_sds_ring;
+ qlcnic_attach(adapter);
+ }
netif_device_attach(netdev);
return ret;
}
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
- ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+ max_sds_rings);
if (ret)
goto fail_diag_alloc;
break;
}
config = cmd.rsp.arg[3];
+ if (QLC_83XX_SFP_PRESENT(config)) {
+ switch (ahw->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ahw->supported_type = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ahw->supported_type = PORT_TP;
+ break;
+ default:
+ ahw->supported_type = PORT_OTHER;
+ }
+ }
if (config & 1)
err = 1;
}
return config;
}
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
u32 config = 0;
int status = 0;
ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
/* hard code until there is a way to get it from flash */
ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+ ecmd->duplex = ahw->link_duplex;
+ ecmd->autoneg = ahw->link_autoneg;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+ }
+
+ switch (ahw->supported_type) {
+ case PORT_FIBRE:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case PORT_TP:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_OTHER;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ ecmd->phy_address = ahw->physical_port;
return status;
}
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
- ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+ max_sds_rings);
if (ret)
goto fail_diag_irq;
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
struct ethtool_pauseparam *);
int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
#endif
clear_bit(__QLCNIC_RESETTING, &adapter->state);
dev_err(&adapter->pdev->dev, "%s:\n", __func__);
- adapter->netdev->trans_start = jiffies;
-
return 0;
}
}
done:
netif_device_attach(netdev);
- if (netif_running(netdev)) {
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
- }
}
static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
- adapter->ahw->idc.quiesce_req = 0;
- adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
- adapter->ahw->idc.err_code = 0;
- adapter->ahw->idc.collect_dump = 0;
+
+ ahw->idc.quiesce_req = 0;
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->idc.err_code = 0;
+ ahw->idc.collect_dump = 0;
+ ahw->reset_context = 0;
+ adapter->tx_timeo_cnt = 0;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
/**
/* Check for soft reset request */
if (ahw->reset_context &&
!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ adapter->ahw->reset_context = 0;
qlcnic_83xx_idc_tx_soft_reset(adapter);
return ret;
}
static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
{
dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->ahw->idc.err_code = -EIO;
return 0;
"ctx_lro_pkt_cnt",
"ctx_ip_csum_error",
"ctx_rx_pkts_wo_ctx",
- "ctx_rx_pkts_dropped_wo_sts",
+ "ctx_rx_pkts_drop_wo_sds_on_card",
+ "ctx_rx_pkts_drop_wo_sds_on_host",
"ctx_rx_osized_pkts",
"ctx_rx_pkts_dropped_wo_rds",
"ctx_rx_unexpected_mcast_pkts",
"ctx_invalid_mac_address",
- "ctx_rx_rds_ring_prim_attemoted",
+ "ctx_rx_rds_ring_prim_attempted",
"ctx_rx_rds_ring_prim_success",
"ctx_num_lro_flows_added",
"ctx_num_lro_flows_removed",
qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_settings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_settings(adapter, ecmd);
+
+ return -EIO;
+}
+
+int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
int check_sfp_module = 0;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val = 0;
- if (qlcnic_83xx_check(adapter))
- qlcnic_83xx_get_settings(adapter);
- else
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
}
- if (netif_running(dev) && adapter->ahw->has_link_events) {
- if (qlcnic_82xx_check(adapter)) {
- reg = QLCRD32(adapter,
- P3P_LINK_SPEED_REG(pcifn));
- speed = P3P_LINK_SPEED_VAL(pcifn, reg);
- ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
- }
- ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
- ecmd->autoneg = adapter->ahw->link_autoneg;
- ecmd->duplex = adapter->ahw->link_duplex;
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+ ecmd->autoneg = ahw->link_autoneg;
+ ecmd->duplex = ahw->link_duplex;
goto skip;
}
case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
ecmd->advertising |= ADVERTISED_TP;
ecmd->supported |= SUPPORTED_TP;
- check_sfp_module = netif_running(dev) &&
- adapter->ahw->has_link_events;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->advertising |=
(ADVERTISED_FIBRE | ADVERTISED_TP);
ecmd->port = PORT_FIBRE;
- check_sfp_module = netif_running(dev) &&
- adapter->ahw->has_link_events;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
} else {
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->port = PORT_TP;
}
break;
- case QLCNIC_BRDTYPE_83XX_10G:
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
- ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
- ecmd->port = PORT_FIBRE;
- check_sfp_module = netif_running(dev) && ahw->has_link_events;
- break;
default:
dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
adapter->ahw->board_type);
return rc;
}
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+ netdev_features_t features)
+{
+ u32 offload_flags = adapter->offload_flags;
+
+ if (offload_flags & BIT_0) {
+ features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+ adapter->rx_csum = 1;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ if (!(offload_flags & BIT_1))
+ features &= ~NETIF_F_TSO;
+ else
+ features |= NETIF_F_TSO;
+
+ if (!(offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO6;
+ else
+ features |= NETIF_F_TSO6;
+ }
+ } else {
+ features &= ~(NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ adapter->rx_csum = 0;
+ }
+
+ return features;
+}
netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed;
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
- qlcnic_82xx_check(adapter)) {
- netdev_features_t changed = features ^ netdev->features;
- features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+ features = qlcnic_process_flags(adapter, features);
+ } else {
+ changed = features ^ netdev->features;
+ features ^= changed & (NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+ }
}
if (!(features & NETIF_F_RXCSUM))
#define QLCNIC_SET_OWNER 1
#define QLCNIC_CLR_OWNER 0
-#define QLCNIC_MBX_TIMEOUT 10000
+#define QLCNIC_MBX_TIMEOUT 5000
#define QLCNIC_MBX_RSP_OK 1
#define QLCNIC_MBX_PORT_RSP_OK 0x1a
"Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
int qlcnic_use_msi = 1;
-MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
module_param_named(use_msi, qlcnic_use_msi, int, 0444);
int qlcnic_use_msi_x = 1;
-MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
int qlcnic_auto_fw_reset = 1;
-MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
int qlcnic_load_fw_file;
-MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
int qlcnic_config_npars;
module_param(qlcnic_config_npars, int, 0444);
-MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
+MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)");
static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void qlcnic_remove(struct pci_dev *pdev);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
-static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
- struct qlcnic_esw_func_cfg *);
static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
-#define QLCNIC_IS_TSO_CAPABLE(adapter) \
- ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
-
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
return 0;
}
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head;
+
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+ if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ return;
+ }
+ }
+}
+
static int qlcnic_set_mac(struct net_device *netdev, void *p)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
+ if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+ return 0;
+
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
qlcnic_napi_disable(adapter);
}
+ qlcnic_delete_adapter_mac(adapter);
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
qlcnic_set_multi(adapter->netdev);
if (!esw_cfg->promisc_mode)
adapter->flags |= QLCNIC_PROMISC_DISABLED;
-
- qlcnic_set_netdev_features(adapter, esw_cfg);
}
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
return -EIO;
qlcnic_set_vlan_config(adapter, &esw_cfg);
qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+ qlcnic_set_netdev_features(adapter, &esw_cfg);
return 0;
}
-static void
-qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- unsigned long features, vlan_features;
if (qlcnic_83xx_check(adapter))
return;
- features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
- vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
-
- if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
- features |= (NETIF_F_TSO | NETIF_F_TSO6);
- vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
- }
-
- if (netdev->features & NETIF_F_LRO)
- features |= NETIF_F_LRO;
-
- if (esw_cfg->offload_flags & BIT_0) {
- netdev->features |= features;
- adapter->rx_csum = 1;
- if (!(esw_cfg->offload_flags & BIT_1)) {
- netdev->features &= ~NETIF_F_TSO;
- features &= ~NETIF_F_TSO;
- }
- if (!(esw_cfg->offload_flags & BIT_2)) {
- netdev->features &= ~NETIF_F_TSO6;
- features &= ~NETIF_F_TSO6;
- }
- } else {
- netdev->features &= ~features;
- features &= ~features;
- adapter->rx_csum = 0;
- }
-
- netdev->vlan_features = (features & vlan_features);
+ adapter->offload_flags = esw_cfg->offload_flags;
+ adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+ netdev_update_features(netdev);
+ adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
}
static int
pci_enable_pcie_error_reporting(pdev);
ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
- if (!ahw)
+ if (!ahw) {
+ err = -ENOMEM;
goto err_out_free_res;
+ }
switch (ent->device) {
case PCI_DEVICE_ID_QLOGIC_QLE824X:
adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
if (adapter->qlcnic_wq == NULL) {
+ err = -ENOMEM;
dev_err(&pdev->dev, "Failed to create workqueue\n");
goto err_out_free_netdev;
}
goto err_out_disable_msi;
}
+ err = qlcnic_get_act_pci_func(adapter);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
goto err_out_disable_mbx_intr;
break;
}
- if (qlcnic_get_act_pci_func(adapter))
- goto err_out_disable_mbx_intr;
-
if (adapter->drv_mac_learn)
qlcnic_alloc_lb_filters_mem(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
- dev_err(&netdev->dev, "transmit timeout, resetting.\n");
-
- if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
- adapter->need_fw_reset = 1;
- else
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
+ netdev_info(netdev, "Tx timeout, reset the adapter.\n");
+ if (qlcnic_82xx_check(adapter))
+ adapter->need_fw_reset = 1;
+ else if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ } else {
+ netdev_info(netdev, "Tx timeout, reset adapter context.\n");
adapter->ahw->reset_context = 1;
+ }
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
+ if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
qlcnic_reset_hw_context(adapter);
- adapter->netdev->trans_start = jiffies;
- }
return 0;
}
static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
u32 *pay, u8 pci_func, u8 size)
{
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
unsigned long flags;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
u16 opcode;
u8 mbx_err_code;
int i, j;
* assume something is wrong.
*/
poll:
- rsp = qlcnic_83xx_mbx_poll(adapter);
+ rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
if (rsp != QLCNIC_RCODE_TIMEOUT) {
/* Get the FW response data */
fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val)
- goto poll;
+ goto poll;
}
mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
qlcnic_sriov_vf_attach(adapter);
- adapter->netdev->trans_start = jiffies;
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
adapter->fw_fail_cnt = 0;
if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
return -EINVAL;
- if (!(cmd->req.arg[1] & BIT_8))
- return -EINVAL;
-
return 0;
}
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ rtnl_lock();
+ qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+ rtnl_unlock();
break;
case QLCNIC_ADD_VLAN:
qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
+ rx_ring->pg_chunk.page = NULL;
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
return -ENOMEM;
curr_idx = 0;
}
+ if (rx_ring->pg_chunk.page) {
+ pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ put_page(rx_ring->pg_chunk.page);
+ rx_ring->pg_chunk.page = NULL;
+ }
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
dev_err(&pdev->dev, "net device registration failed.\n");
ql_release_all(pdev);
pci_disable_device(pdev);
+ free_netdev(ndev);
return err;
}
/* Start up the timer to trigger EEH if
cp->dev->stats.tx_dropped++;
}
}
+ netdev_reset_queue(cp->dev);
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
return -EIO;
}
-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+static bool rtl_skb_pad(struct sk_buff *skb)
+{
+ if (skb_padto(skb, ETH_ZLEN))
+ return false;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ return true;
+}
+
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+{
+ return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+}
+
+static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
+ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+ return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+
if (ip->protocol == IPPROTO_TCP)
opts[offset] |= info->checksum.tcp;
else if (ip->protocol == IPPROTO_UDP)
opts[offset] |= info->checksum.udp;
else
WARN_ON_ONCE(1);
+ } else {
+ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+ return rtl_skb_pad(skb);
}
+ return true;
}
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
- /* 8168evl does not automatically pad to minimum length. */
- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
- skb->len < ETH_ZLEN)) {
- if (skb_padto(skb, ETH_ZLEN))
- goto err_update_stats;
- skb_put(skb, ETH_ZLEN - skb->len);
- }
-
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
+ opts[0] = DescOwn;
+
+ if (!rtl8169_tso_csum(tp, skb, opts))
+ goto err_update_stats;
+
len = skb_headlen(skb);
mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
- opts[0] = DescOwn;
-
- rtl8169_tso_csum(tp, skb, opts);
-
frags = rtl8169_xmit_frags(tp, skb, opts);
if (frags < 0)
goto err_dma_1;
if (mdp->cd->tsu) {
struct resource *rtsu;
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!rtsu) {
- dev_err(&pdev->dev, "Not found TSU resource\n");
- ret = -ENODEV;
- goto out_release;
- }
mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
if (IS_ERR(mdp->tsu_addr)) {
ret = PTR_ERR(mdp->tsu_addr);
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
- EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+ NET_IP_ALIGN + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = false;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
- EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
- PAGE_SIZE / 2);
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
efx->rx_scatter = true;
efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
efx->rx_buffer_order = 0;
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
-/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
-#define EFX_RX_USR_BUF_SIZE 1824
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer. Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT 4
+#endif
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
STATE_RECOVERY = 3, /* device recovering from PCI error */
};
-/*
- * Alignment of page-allocated RX buffers
- *
- * Controls the number of bytes inserted at the start of an RX buffer.
- * This is the equivalent of NET_IP_ALIGN [which controls the alignment
- * of the skb->head for hardware DMA].
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define EFX_PAGE_IP_ALIGN 0
-#else
-#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
-#endif
-
/*
* Alignment of the skb->head which wraps a page-allocated RX buffer
*
* The skb allocated to wrap an rx_buffer can have this alignment. Since
* the data is memcpy'd from the rx_buf, it does not need to be equal to
- * EFX_PAGE_IP_ALIGN.
+ * NET_IP_ALIGN.
*/
#define EFX_PAGE_SKB_ALIGN 2
void efx_rx_config_page_split(struct efx_nic *efx)
{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
- L1_CACHE_BYTES);
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+ EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
efx->rx_page_buf_step);
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
rx_buf->page = page;
- rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->page_offset = page_offset + NET_IP_ALIGN;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
config STMMAC_ETH
tristate "STMicroelectronics 10/100/1000 Ethernet driver"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
select NET_CORE
select MII
select PHYLIB
phy_write(lp->phy_dev, MII_CTRL1000, 0);
/* Advertise only 10 and 100mbps full/half duplex speeds */
- phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
+ ADVERTISE_CSMA);
/* Restart auto negotiation */
bmcr = phy_read(lp->phy_dev, MII_BMCR);
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <net/arp.h>
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
- skb->vlan_tci = packet->vlan_tci;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
}
if (port->passthru)
- vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+ vlan = list_first_or_null_rcu(&port->vlans,
+ struct macvlan_dev, list);
else
vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
if (err < 0)
goto upper_dev_unlink;
- list_add_tail(&vlan->list, &port->vlans);
+ list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
{
struct macvlan_dev *vlan = netdev_priv(dev);
- list_del(&vlan->list);
+ list_del_rcu(&vlan->list);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(vlan->lowerdev, dev);
}
if (dev == NULL)
return;
+ list_del(&dev->list);
+
ndev = dev->ndev;
unregister_netdev(ndev);
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
- if ((lp & adv & settings[idx].setting))
+ if (!(lp & adv & settings[idx].setting))
goto eee_exit;
if (clk_stop_enable) {
bool incomplete;
int i;
- port = list_first_entry(&team->port_list, struct team_port, list);
+ port = list_first_entry_or_null(&team->port_list,
+ struct team_port, list);
start_again:
err = __send_and_alloc_skb(&skb, team, portid, send_func);
err = team_nl_fill_one_port_get(skb, one_port);
if (err)
goto errout;
- } else {
- list_for_each_entry(port, &team->port_list, list) {
+ } else if (port) {
+ list_for_each_entry_from(port, &team->port_list, list) {
err = team_nl_fill_one_port_get(skb, port);
if (err) {
if (err == -EMSGSIZE) {
else
return -EINVAL;
+ if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
+ !!(tun->flags & TUN_TAP_MQ))
+ return -EINVAL;
+
if (tun_not_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
struct usb_device *udev;
struct tasklet_struct tl;
struct net_device *netdev;
- struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb;
+ struct urb *rx_urb, *tx_urb, *intr_urb;
struct sk_buff *tx_skb, *rx_skb;
struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE];
spinlock_t rx_pool_lock;
struct usb_ctrlrequest dr;
int intr_interval;
- __le16 rx_creg;
u8 *intr_buff;
u8 phy;
};
typedef struct rtl8150 rtl8150_t;
+struct async_req {
+ struct usb_ctrlrequest dr;
+ u16 rx_creg;
+};
+
static const char driver_name [] = "rtl8150";
/*
indx, 0, data, size, 500);
}
-static void ctrl_callback(struct urb *urb)
+static void async_set_reg_cb(struct urb *urb)
{
- rtl8150_t *dev;
+ struct async_req *req = (struct async_req *)urb->context;
int status = urb->status;
- switch (status) {
- case 0:
- break;
- case -EINPROGRESS:
- break;
- case -ENOENT:
- break;
- default:
- if (printk_ratelimit())
- dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
- }
- dev = urb->context;
- clear_bit(RX_REG_SET, &dev->flags);
+ if (status < 0)
+ dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status);
+ kfree(req);
+ usb_free_urb(urb);
}
-static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size)
+static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg)
{
- int ret;
-
- if (test_bit(RX_REG_SET, &dev->flags))
- return -EAGAIN;
+ int res = -ENOMEM;
+ struct urb *async_urb;
+ struct async_req *req;
- dev->dr.bRequestType = RTL8150_REQT_WRITE;
- dev->dr.bRequest = RTL8150_REQ_SET_REGS;
- dev->dr.wValue = cpu_to_le16(indx);
- dev->dr.wIndex = 0;
- dev->dr.wLength = cpu_to_le16(size);
- dev->ctrl_urb->transfer_buffer_length = size;
- usb_fill_control_urb(dev->ctrl_urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr,
- &dev->rx_creg, size, ctrl_callback, dev);
- if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) {
- if (ret == -ENODEV)
+ req = kmalloc(sizeof(struct async_req), GFP_ATOMIC);
+ if (req == NULL)
+ return res;
+ async_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (async_urb == NULL) {
+ kfree(req);
+ return res;
+ }
+ req->rx_creg = cpu_to_le16(reg);
+ req->dr.bRequestType = RTL8150_REQT_WRITE;
+ req->dr.bRequest = RTL8150_REQ_SET_REGS;
+ req->dr.wIndex = 0;
+ req->dr.wValue = cpu_to_le16(indx);
+ req->dr.wLength = cpu_to_le16(size);
+ usb_fill_control_urb(async_urb, dev->udev,
+ usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr,
+ &req->rx_creg, size, async_set_reg_cb, req);
+ res = usb_submit_urb(async_urb, GFP_ATOMIC);
+ if (res) {
+ if (res == -ENODEV)
netif_device_detach(dev->netdev);
- dev_err(&dev->udev->dev,
- "control request submission failed: %d\n", ret);
- } else
- set_bit(RX_REG_SET, &dev->flags);
-
- return ret;
+ dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res);
+ }
+ return res;
}
static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
usb_free_urb(dev->tx_urb);
return 0;
}
- dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->ctrl_urb) {
- usb_free_urb(dev->rx_urb);
- usb_free_urb(dev->tx_urb);
- usb_free_urb(dev->intr_urb);
- return 0;
- }
return 1;
}
usb_free_urb(dev->rx_urb);
usb_free_urb(dev->tx_urb);
usb_free_urb(dev->intr_urb);
- usb_free_urb(dev->ctrl_urb);
}
static void unlink_all_urbs(rtl8150_t * dev)
usb_kill_urb(dev->rx_urb);
usb_kill_urb(dev->tx_urb);
usb_kill_urb(dev->intr_urb);
- usb_kill_urb(dev->ctrl_urb);
}
static inline struct sk_buff *pull_skb(rtl8150_t *dev)
}
/* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */
rcr = 0x9e;
- dev->rx_creg = cpu_to_le16(rcr);
tcr = 0xd8;
cr = 0x0c;
if (!(rcr & 0x80))
static void rtl8150_set_multicast(struct net_device *netdev)
{
rtl8150_t *dev = netdev_priv(netdev);
+ u16 rx_creg = 0x9e;
+
netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
- dev->rx_creg |= cpu_to_le16(0x0001);
+ rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
} else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
- dev->rx_creg &= cpu_to_le16(0xfffe);
- dev->rx_creg |= cpu_to_le16(0x0002);
+ rx_creg &= 0xfffe;
+ rx_creg |= 0x0002;
dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
} else {
/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
- dev->rx_creg &= cpu_to_le16(0x00fc);
+ rx_creg &= 0x00fc;
}
- async_set_registers(dev, RCR, 2);
+ async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
netif_wake_queue(netdev);
}
/* usbnet already took usb runtime pm, so have to enable the feature
* for usb interface, otherwise usb_autopm_get_interface may return
- * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ * failure if RUNTIME_PM is enabled.
*/
if (!driver->supports_autosuspend) {
driver->supports_autosuspend = 1;
#include <linux/slab.h>
#include <linux/cpu.h>
-static int napi_weight = 128;
+static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
static bool csum = true, gso = true;
struct virtnet_info *vi = netdev_priv(dev);
int i;
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- /* Make sure we have some buffers: if oom use wq. */
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (i < vi->curr_queue_pairs)
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
}
}
/* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac)
{
return NULL;
}
+static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+ const u8 *mac)
+{
+ struct vxlan_fdb *f;
+
+ f = __vxlan_find_mac(vxlan, mac);
+ if (f)
+ f->used = jiffies;
+
+ return f;
+}
+
/* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f,
__be32 ip, __be16 port, __u32 vni, __u32 ifindex)
struct vxlan_fdb *f;
int notify = 0;
- f = vxlan_find_mac(vxlan, mac);
+ f = __vxlan_find_mac(vxlan, mac);
if (f) {
if (flags & NLM_F_EXCL) {
netdev_dbg(vxlan->dev,
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
- f->used = jiffies;
if (likely(f->remote.remote_ip == src_ip))
return;
int i;
bool needreset = false;
+ if (!test_bit(ATH_STAT_STARTED, ah->status))
+ return;
+
mutex_lock(&ah->lock);
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
mmiowb();
mutex_unlock(&ah->lock);
+ set_bit(ATH_STAT_STARTED, ah->status);
ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
ath5k_stop_tasklets(ah);
+ clear_bit(ATH_STAT_STARTED, ah->status);
cancel_delayed_work_sync(&ah->tx_complete_work);
if (!ath5k_modparam_no_hw_rfkill_switch)
config ATH9K
tristate "Atheros 802.11n wireless cards support"
- depends on MAC80211
+ depends on MAC80211 && HAS_DMA
select ATH9K_HW
select MAC80211_LEDS
select LEDS_CLASS
{
int i;
- if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah))
+ if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
#define AR9300_BASE_ADDR 0x3ff
#define AR9300_BASE_ADDR_512 0x1ff
-#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
-#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
+#define AR9300_OTP_BASE \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
+#define AR9300_OTP_STATUS \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
#define AR9300_OTP_STATUS_TYPE 0x7
#define AR9300_OTP_STATUS_VALID 0x4
#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
#define AR9300_OTP_STATUS_SM_BUSY 0x1
-#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
+#define AR9300_OTP_READ_DATA \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
- if (REG_READ_FIELD(ah, AR_PHY_MODE,
+ if (!AR_SREV_9340(ah) &&
+ REG_READ_FIELD(ah, AR_PHY_MODE,
AR_PHY_MODE_DYNAMIC) == 0x1)
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009d10, 0x01834061},
{0x00009d14, 0x00c00400},
{0x00009d18, 0x00000000},
- {0x00009e08, 0x0078230c},
- {0x00009e24, 0x990bb515},
- {0x00009e28, 0x126f0000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x9907b515},
+ {0x00009e28, 0x126f0600},
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
static const u32 ar9565_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d},
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
/* Addr allmodes */
+ {0x00004050, 0x00300300},
+ {0x0000406c, 0x00100000},
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
+ {0x0000a0c0, 0x00bf00a0},
+ {0x0000a0c4, 0x11a011a1},
+ {0x0000a0c8, 0x11be11bf},
+ {0x0000a0cc, 0x11bc11bd},
+ {0x0000a0d0, 0x22632264},
+ {0x0000a0d4, 0x22612262},
+ {0x0000a0d8, 0x227f2260},
+ {0x0000a0dc, 0x4322227e},
+ {0x0000a0e0, 0x43204321},
+ {0x0000a0e4, 0x433e433f},
+ {0x0000a0e8, 0x4462433d},
+ {0x0000a0ec, 0x44604461},
+ {0x0000a0f0, 0x447e447f},
+ {0x0000a0f4, 0x5582447d},
+ {0x0000a0f8, 0x55805581},
+ {0x0000a0fc, 0x559e559f},
+ {0x0000a100, 0x66816682},
+ {0x0000a104, 0x669f6680},
+ {0x0000a108, 0x669d669e},
+ {0x0000a10c, 0x77627763},
+ {0x0000a110, 0x77607761},
{0x0000a114, 0x00000000},
{0x0000a118, 0x00000000},
{0x0000a11c, 0x00000000},
{0x0000a134, 0x00000000},
{0x0000a138, 0x00000000},
{0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
+ {0x0000a140, 0x00bf00a0},
+ {0x0000a144, 0x11a011a1},
+ {0x0000a148, 0x11be11bf},
+ {0x0000a14c, 0x11bc11bd},
+ {0x0000a150, 0x22632264},
+ {0x0000a154, 0x22612262},
+ {0x0000a158, 0x227f2260},
+ {0x0000a15c, 0x4322227e},
+ {0x0000a160, 0x43204321},
+ {0x0000a164, 0x433e433f},
+ {0x0000a168, 0x4462433d},
+ {0x0000a16c, 0x44604461},
+ {0x0000a170, 0x447e447f},
+ {0x0000a174, 0x5582447d},
+ {0x0000a178, 0x55805581},
+ {0x0000a17c, 0x559e559f},
+ {0x0000a180, 0x66816682},
+ {0x0000a184, 0x669f6680},
+ {0x0000a188, 0x669d669e},
+ {0x0000a18c, 0x77e677e7},
+ {0x0000a190, 0x77e477e5},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
- {0x00018c00, 0x18213ede},
+ {0x00018c00, 0x18212ede},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0003780c},
};
{0x0000a180, 0x66816682},
{0x0000a184, 0x669f6680},
{0x0000a188, 0x669d669e},
- {0x0000a18c, 0x77627763},
- {0x0000a190, 0x77607761},
+ {0x0000a18c, 0x77e677e7},
+ {0x0000a190, 0x77e477e5},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
{0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
{0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
{0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
{0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
{0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
{0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
{0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
- {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0},
+ {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1},
+ {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2},
+ {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3},
+ {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4},
+ {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
{0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
{0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
int tidno;
int baw_head; /* first un-acked tx buffer */
int baw_tail; /* next unused tx buffer slot */
- int sched;
- int paused;
- u8 state;
+ bool sched;
+ bool paused;
+ bool active;
};
struct ath_node {
#endif
};
-#define AGGR_CLEANUP BIT(1)
-#define AGGR_ADDBA_COMPLETE BIT(2)
-#define AGGR_ADDBA_PROGRESS BIT(3)
-
struct ath_tx_control {
struct ath_txq *txq;
struct ath_node *an;
WARN_ON(i != ATH9K_SSTATS_LEN);
}
+void ath9k_deinit_debug(struct ath_softc *sc)
+{
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+ }
+}
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
};
int ath9k_init_debug(struct ath_hw *ah);
+void ath9k_deinit_debug(struct ath_softc *sc);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
return 0;
}
+static inline void ath9k_deinit_debug(struct ath_softc *sc)
+{
+}
+
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
+ int txbuf_size;
ENABLE_REGWRITE_BUFFER(ah);
* So set the usable tx buf size also to half to
* avoid data/delimiter underruns
*/
- REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
- AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
- } else if (!AR_SREV_9271(ah)) {
- REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
- AR_PCU_TXBUF_CTRL_USABLE_SIZE);
+ txbuf_size = AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE;
+ } else if (AR_SREV_9340_13_OR_LATER(ah)) {
+ /* Uses fewer entries for AR934x v1.3+ to prevent rx overruns */
+ txbuf_size = AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE;
+ } else {
+ txbuf_size = AR_PCU_TXBUF_CTRL_USABLE_SIZE;
}
+ if (!AR_SREV_9271(ah))
+ REG_WRITE(ah, AR_PCU_TXBUF_CTRL, txbuf_size);
+
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9300_20_OR_LATER(ah))
AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
} else {
tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
- if (tmpReg &
- (AR_INTR_SYNC_LOCAL_TIMEOUT |
- AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ if (AR_SREV_9340(ah))
+ tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT;
+ else
+ tmpReg &= AR_INTR_SYNC_LOCAL_TIMEOUT |
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT;
+
+ if (tmpReg) {
u32 val;
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
if (!ath_is_world_regd(reg)) {
error = regulatory_hint(hw->wiphy, reg->alpha2);
if (error)
- goto unregister;
+ goto debug_cleanup;
}
ath_init_leds(sc);
return 0;
+debug_cleanup:
+ ath9k_deinit_debug(sc);
unregister:
ieee80211_unregister_hw(hw);
rx_cleanup:
sc->dfs_detector->exit(sc->dfs_detector);
ath9k_eeprom_release(sc);
-
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
- }
}
void ath9k_deinit_device(struct ath_softc *sc)
ath9k_ps_restore(sc);
+ ath9k_deinit_debug(sc);
ieee80211_unregister_hw(hw);
ath_rx_cleanup(sc);
ath9k_deinit_softc(sc);
REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah))
REG_WRITE(ah, AR_DMISC(q),
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
else
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
goto work;
- ath9k_set_beacon(sc);
-
if (ah->opmode == NL80211_IFTYPE_STATION &&
test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ } else {
+ ath9k_set_beacon(sc);
}
work:
ath_restart_work(sc);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_node *an = (struct ath_node *) sta->drv_priv;
struct ieee80211_key_conf ps_key = { };
+ int key;
ath_node_attach(sc, sta, vif);
vif->type != NL80211_IFTYPE_AP_VLAN)
return 0;
- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
+ key = ath_key_config(common, vif, sta, &ps_key);
+ if (key > 0)
+ an->ps_key = key;
return 0;
}
return;
ath_key_delete(common, &ps_key);
+ an->ps_key = 0;
}
static int ath9k_sta_remove(struct ieee80211_hw *hw,
u16 tid, u16 *ssn, u8 buf_size)
{
struct ath_softc *sc = hw->priv;
+ bool flush = false;
int ret = 0;
local_bh_disable();
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ flush = true;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ if (!flush)
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
return false;
txtid = ATH_AN_2_TID(an, tidno);
-
- if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
- return true;
- return false;
+ return !txtid->active;
}
#define AR_SREV_REVISION_9485_10 0
#define AR_SREV_REVISION_9485_11 1
#define AR_SREV_VERSION_9340 0x300
+#define AR_SREV_REVISION_9340_10 0
+#define AR_SREV_REVISION_9340_11 1
+#define AR_SREV_REVISION_9340_12 2
+#define AR_SREV_REVISION_9340_13 3
#define AR_SREV_VERSION_9580 0x1C0
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
#define AR_SREV_9340(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340))
+#define AR_SREV_9340_13_OR_LATER(_ah) \
+ (AR_SREV_9340((_ah)) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9340_13))
+
#define AR_SREV_9285E_20(_ah) \
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
AR_INTR_SYNC_LOCAL_TIMEOUT |
AR_INTR_SYNC_MAC_SLEEP_ACCESS),
+ AR9340_INTR_SYNC_LOCAL_TIMEOUT = 0x00000010,
+
AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
};
#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
+#define AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE 0x500
#define AR_PCU_MISC_MODE2 0x8344
#define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002
list_add_tail(&ac->list, &txq->axq_acq);
}
-static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
-{
- struct ath_txq *txq = tid->ac->txq;
-
- WARN_ON(!tid->paused);
-
- ath_txq_lock(sc, txq);
- tid->paused = false;
-
- if (skb_queue_empty(&tid->buf_q))
- goto unlock;
-
- ath_tx_queue_tid(txq, tid);
- ath_txq_schedule(sc, txq);
-unlock:
- ath_txq_unlock_complete(sc, txq);
-}
-
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
}
}
- if (tid->baw_head == tid->baw_tail) {
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
- }
-
if (sendbar) {
ath_txq_unlock(sc, txq);
ath_send_bar(tid, tid->seq_start);
list_add_tail(&bf->list, &bf_head);
- if (fi->retries)
- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
-
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
- if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
+ /*
+ * Outside of the current BlockAck window,
+ * maybe part of a previous session
+ */
+ txfail = 1;
+ } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
/* transmit completion, subframe is
* acked by block ack */
acked_cnt++;
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
- } else if (tid->state & AGGR_CLEANUP) {
- /*
- * cleanup in progress, just fail
- * the un-acked sub-frames
- */
- txfail = 1;
} else if (flush) {
txpending = 1;
} else if (fi->retries < ATH_MAX_SW_RETRIES) {
if (bf_next != NULL || !bf_last->bf_stale)
list_move_tail(&bf->list, &bf_head);
- if (!txpending || (tid->state & AGGR_CLEANUP)) {
+ if (!txpending) {
/*
* complete the acked-ones/xretried ones; update
* block-ack window
ath_txq_lock(sc, txq);
}
- if (tid->state & AGGR_CLEANUP)
- ath_tx_flush_tid(sc, tid);
-
rcu_read_unlock();
if (needreset)
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
{
+ struct ieee80211_tx_info *info;
bool txok, flush;
txok = !(ts->ts_status & ATH9K_TXERR_MASK);
txq->axq_ampdu_depth--;
if (!bf_isampdu(bf)) {
- if (!flush)
+ if (!flush) {
+ info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ memcpy(info->control.rates, bf->rates,
+ sizeof(info->control.rates));
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
+ }
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
skb = bf->bf_mpdu;
tx_info = IEEE80211_SKB_CB(skb);
- rates = tx_info->control.rates;
+ rates = bf->rates;
/*
* Find the lowest frame length among the rate series that will have a
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
- if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
- return -EAGAIN;
-
/* update ampdu factor/density, they may have changed. This may happen
* in HT IBSS when a beacon with HT-info is received after the station
* has already been added.
an->mpdudensity = density;
}
- txtid->state |= AGGR_ADDBA_PROGRESS;
+ txtid->active = true;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
txtid->bar_index = -1;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = txtid->ac->txq;
- if (txtid->state & AGGR_CLEANUP)
- return;
-
- if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
- txtid->state &= ~AGGR_ADDBA_PROGRESS;
- return;
- }
-
ath_txq_lock(sc, txq);
+ txtid->active = false;
txtid->paused = true;
-
- /*
- * If frames are still being transmitted for this TID, they will be
- * cleaned up during tx completion. To prevent race conditions, this
- * TID can only be reused after all in-progress subframes have been
- * completed.
- */
- if (txtid->baw_head != txtid->baw_tail)
- txtid->state |= AGGR_CLEANUP;
- else
- txtid->state &= ~AGGR_ADDBA_COMPLETE;
-
ath_tx_flush_tid(sc, txtid);
ath_txq_unlock_complete(sc, txq);
}
}
}
-void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
+void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tidno)
{
- struct ath_atx_tid *txtid;
+ struct ath_atx_tid *tid;
struct ath_node *an;
+ struct ath_txq *txq;
an = (struct ath_node *)sta->drv_priv;
+ tid = ATH_AN_2_TID(an, tidno);
+ txq = tid->ac->txq;
- txtid = ATH_AN_2_TID(an, tid);
- txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
- txtid->state |= AGGR_ADDBA_COMPLETE;
- txtid->state &= ~AGGR_ADDBA_PROGRESS;
- ath_tx_resume_tid(sc, txtid);
+ ath_txq_lock(sc, txq);
+
+ tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
+ tid->paused = false;
+
+ if (!skb_queue_empty(&tid->buf_q)) {
+ ath_tx_queue_tid(txq, tid);
+ ath_txq_schedule(sc, txq);
+ }
+
+ ath_txq_unlock_complete(sc, txq);
}
/********************/
tid->baw_head = tid->baw_tail = 0;
tid->sched = false;
tid->paused = false;
- tid->state &= ~AGGR_CLEANUP;
+ tid->active = false;
__skb_queue_head_init(&tid->buf_q);
acno = TID_TO_WME_AC(tidno);
tid->ac = &an->ac[acno];
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_ADDBA_PROGRESS;
}
for (acno = 0, ac = &an->ac[acno];
}
ath_tid_drain(sc, txq, tid);
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
+ tid->active = false;
ath_txq_unlock(sc, txq);
}
netif_carrier_off(dev);
- if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv));
+ if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv))
printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n",
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
+void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
+{
+ int current_slot, previous_slot;
+
+ B43_WARN_ON(ring->tx);
+
+ /* Device has filled all buffers, drop all packets and let TCP
+ * decrease speed.
+ * Decrement RX index by one will let the device to see all slots
+ * as free again
+ */
+ /*
+ *TODO: How to increase rx_drop in mac80211?
+ */
+ current_slot = ring->ops->get_current_rxslot(ring);
+ previous_slot = prev_slot(ring, current_slot);
+ ring->ops->set_current_rxslot(ring, previous_slot);
+}
+
void b43_dma_rx(struct b43_dmaring *ring)
{
const struct b43_dma_ops *ops = ring->ops;
/* DMA-Interrupt reasons. */
#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
| (1 << 14) | (1 << 15))
-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
+#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
#define B43_DMAIRQ_RX_DONE (1 << 16)
/*** 32-bit DMA Engine. ***/
void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
+void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
+
void b43_dma_rx(struct b43_dmaring *ring);
void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
}
}
- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
- B43_DMAIRQ_NONFATALMASK))) {
- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
- b43err(dev->wl, "Fatal DMA error: "
- "0x%08X, 0x%08X, 0x%08X, "
- "0x%08X, 0x%08X, 0x%08X\n",
- dma_reason[0], dma_reason[1],
- dma_reason[2], dma_reason[3],
- dma_reason[4], dma_reason[5]);
- b43err(dev->wl, "This device does not support DMA "
+ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
+ b43err(dev->wl,
+ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
+ dma_reason[0], dma_reason[1],
+ dma_reason[2], dma_reason[3],
+ dma_reason[4], dma_reason[5]);
+ b43err(dev->wl, "This device does not support DMA "
"on your system. It will now be switched to PIO.\n");
- /* Fall back to PIO transfers if we get fatal DMA errors! */
- dev->use_pio = true;
- b43_controller_restart(dev, "DMA error");
- return;
- }
- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
- b43err(dev->wl, "DMA error: "
- "0x%08X, 0x%08X, 0x%08X, "
- "0x%08X, 0x%08X, 0x%08X\n",
- dma_reason[0], dma_reason[1],
- dma_reason[2], dma_reason[3],
- dma_reason[4], dma_reason[5]);
- }
+ /* Fall back to PIO transfers if we get fatal DMA errors! */
+ dev->use_pio = true;
+ b43_controller_restart(dev, "DMA error");
+ return;
}
if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
handle_irq_noise(dev);
/* Check the DMA reason registers for received data. */
+ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
+ if (B43_DEBUG)
+ b43warn(dev->wl, "RX descriptor underrun\n");
+ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
+ }
if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
if (b43_using_pio_transfers(dev))
b43_pio_rx(dev->pio.rx_queue);
return IRQ_NONE;
dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
- & 0x0001DC00;
+ & 0x0001FC00;
dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
& 0x0000DC00;
dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
b43_write32(dev, 0x018C, 0x02000000);
}
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
+ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
#include "tracepoint.h"
#define PKTFILTER_BUF_SIZE 128
-#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
#define BRCMF_DEFAULT_BCN_TIMEOUT 3
#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
goto done;
}
- /* Try to set and enable ARP offload feature, this may fail */
- err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE);
- if (err) {
- brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
- BRCMF_ARPOL_MODE, err);
- err = 0;
- } else {
- err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1);
- if (err) {
- brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n",
- err);
- err = 0;
- } else
- brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n",
- BRCMF_ARPOL_MODE);
- }
-
/* Setup packet filter */
brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+ ndev->destructor = free_netdev;
return 0;
fail:
+ drvr->iflist[ifp->bssidx] = NULL;
ndev->netdev_ops = NULL;
+ free_netdev(ndev);
return -EBADE;
}
return 0;
fail:
+ ifp->drvr->iflist[ifp->bssidx] = NULL;
+ ndev->netdev_ops = NULL;
+ free_netdev(ndev);
return -EBADE;
}
struct brcmf_if *ifp;
ifp = drvr->iflist[bssidx];
+ drvr->iflist[bssidx] = NULL;
if (!ifp) {
brcmf_err("Null interface, idx=%d\n", bssidx);
return;
cancel_work_sync(&ifp->setmacaddr_work);
cancel_work_sync(&ifp->multicast_work);
}
-
+ /* unregister will take care of freeing it */
unregister_netdev(ifp->ndev);
if (bssidx == 0)
brcmf_cfg80211_detach(drvr->config);
- free_netdev(ifp->ndev);
} else {
kfree(ifp);
}
- drvr->iflist[bssidx] = NULL;
}
int brcmf_attach(uint bus_hdrlen, struct device *dev)
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
- free_netdev(ifp->ndev);
- drvr->iflist[0] = NULL;
if (p2p_ifp) {
free_netdev(p2p_ifp->ndev);
drvr->iflist[1] = NULL;
return ret;
}
if ((brcmf_p2p_enable) && (p2p_ifp))
- brcmf_net_p2p_attach(p2p_ifp);
+ if (brcmf_net_p2p_attach(p2p_ifp) < 0)
+ brcmf_p2p_enable = 0;
return 0;
}
return;
brcmf_fws_add_interface(ifp);
if (!drvr->fweh.evt_handler[BRCMF_E_IF])
- err = brcmf_net_attach(ifp, false);
+ if (brcmf_net_attach(ifp, false) < 0)
+ return;
}
if (ifevent->action == BRCMF_E_IF_CHANGE)
#define BRCMF_FIL_ACTION_FRAME_SIZE 1800
+/* ARP Offload feature flags for arp_ol iovar */
+#define BRCMF_ARP_OL_AGENT 0x00000001
+#define BRCMF_ARP_OL_SNOOP 0x00000002
+#define BRCMF_ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
+
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
(channel == SOCIAL_CHAN_2) || \
(channel == SOCIAL_CHAN_3))
+#define BRCMF_P2P_TEMP_CHAN SOCIAL_CHAN_3
#define SOCIAL_CHAN_CNT 3
#define AF_PEER_SEARCH_CNT 2
err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
if (err < 0) {
brcmf_err("set p2p_disc error\n");
- brcmf_free_vif(p2p_vif);
+ brcmf_free_vif(cfg, p2p_vif);
goto exit;
}
/* obtain bsscfg index for P2P discovery */
err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
if (err < 0) {
brcmf_err("retrieving discover bsscfg index failed\n");
- brcmf_free_vif(p2p_vif);
+ brcmf_free_vif(cfg, p2p_vif);
goto exit;
}
/* Verify that firmware uses same bssidx as driver !! */
if (p2p_ifp->bssidx != bssidx) {
brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
bssidx, p2p_ifp->bssidx);
- brcmf_free_vif(p2p_vif);
+ brcmf_free_vif(cfg, p2p_vif);
goto exit;
}
brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p);
/* remove discovery interface */
- brcmf_free_vif(vif);
+ brcmf_free_vif(p2p->cfg, vif);
p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
}
/* just set it all to zero */
u16 *chanspec)
{
struct brcmf_if *ifp;
- struct brcmf_fil_chan_info_le ci;
+ u8 mac_addr[ETH_ALEN];
struct brcmu_chan ch;
- s32 err;
+ struct brcmf_bss_info_le *bi;
+ u8 *buf;
ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
- ch.chnum = 11;
-
- err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci));
- if (!err)
- ch.chnum = le32_to_cpu(ci.hw_channel);
+ if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mac_addr,
+ ETH_ALEN) == 0) {
+ buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (buf != NULL) {
+ *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
+ if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+ buf, WL_BSS_INFO_MAX) == 0) {
+ bi = (struct brcmf_bss_info_le *)(buf + 4);
+ *chanspec = le16_to_cpu(bi->chanspec);
+ kfree(buf);
+ return;
+ }
+ kfree(buf);
+ }
+ }
+ /* Use default channel for P2P */
+ ch.chnum = BRCMF_P2P_TEMP_CHAN;
ch.bw = BRCMU_CHAN_BW_20;
p2p->cfg->d11inf.encchspec(&ch);
*chanspec = ch.chspec;
return &p2p_vif->wdev;
fail:
- brcmf_free_vif(p2p_vif);
+ brcmf_free_vif(p2p->cfg, p2p_vif);
return ERR_PTR(err);
}
*
* @vif: virtual interface object to delete.
*/
-static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_vif *vif)
+static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif)
{
- struct brcmf_p2p_info *p2p = &vif->ifp->drvr->config->p2p;
-
cfg80211_unregister_wdev(&vif->wdev);
- p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- brcmf_free_vif(vif);
+ cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+ brcmf_free_vif(cfg, vif);
+}
+
+/**
+ * brcmf_p2p_free_p2p_if() - free up net device related data.
+ *
+ * @ndev: net device that needs to be freed.
+ */
+static void brcmf_p2p_free_p2p_if(struct net_device *ndev)
+{
+ struct brcmf_cfg80211_info *cfg;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
+
+ ifp = netdev_priv(ndev);
+ cfg = ifp->drvr->config;
+ vif = ifp->vif;
+
+ brcmf_free_vif(cfg, vif);
+ free_netdev(ifp->ndev);
}
/**
brcmf_err("Registering netdevice failed\n");
goto fail;
}
+ /* override destructor */
+ ifp->ndev->destructor = brcmf_p2p_free_p2p_if;
+
cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
/* Disable firmware roaming for P2P interface */
brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
return &ifp->vif->wdev;
fail:
- brcmf_free_vif(vif);
+ brcmf_free_vif(cfg, vif);
return ERR_PTR(err);
}
break;
case NL80211_IFTYPE_P2P_DEVICE:
- brcmf_p2p_delete_p2pdev(vif);
+ brcmf_p2p_delete_p2pdev(cfg, vif);
return 0;
default:
return -ENOTSUPP;
err = 0;
}
brcmf_cfg80211_arm_vif_event(cfg, NULL);
- brcmf_free_vif(vif);
p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
return err;
return err;
}
+static s32
+brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
+{
+ s32 err;
+ u32 mode;
+
+ if (enable)
+ mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY;
+ else
+ mode = 0;
+
+ /* Try to set and enable ARP offload feature, this may fail, then it */
+ /* is simply not supported and err 0 will be returned */
+ err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
+ mode, err);
+ err = 0;
+ } else {
+ err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n",
+ enable, err);
+ err = 0;
+ } else
+ brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n",
+ enable, mode);
+ }
+
+ return err;
+}
+
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
enum nl80211_iftype type,
}
pm = enabled ? PM_FAST : PM_OFF;
+ /* Do not enable the power save after assoc if it is a p2p interface */
+ if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ brcmf_dbg(INFO, "Do not enable power save for P2P clients\n");
+ pm = PM_OFF;
+ }
brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled"));
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
return err;
}
+static s32
+brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp,
+ struct ieee80211_channel *channel)
+{
+ u16 chanspec;
+ s32 err;
+
+ brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
+ channel->center_freq);
+
+ chanspec = channel_to_chanspec(&cfg->d11inf, channel);
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+
+ return err;
+}
+
static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
s32 ie_offset;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
}
brcmf_set_mpc(ifp, 0);
+ brcmf_configure_arp_offload(ifp, false);
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
+ err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
+ if (err < 0) {
+ brcmf_err("Set Channel failed, %d\n", err);
+ goto exit;
+ }
+
if (settings->beacon_interval) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
settings->beacon_interval);
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
exit:
- if (err)
+ if (err) {
brcmf_set_mpc(ifp, 1);
+ brcmf_configure_arp_offload(ifp, true);
+ }
return err;
}
brcmf_err("bss_enable config failed %d\n", err);
}
brcmf_set_mpc(ifp, 1);
+ brcmf_configure_arp_offload(ifp, true);
set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO)
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ }
};
static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
{
.max_interfaces = BRCMF_IFACE_MAX_CNT,
- .num_different_channels = 1, /* no multi-channel for now */
+ .num_different_channels = 2,
.n_limits = ARRAY_SIZE(brcmf_iface_limits),
.limits = brcmf_iface_limits
}
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
wiphy->iface_combinations = brcmf_iface_combos;
wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
return vif;
}
-void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif)
{
- struct brcmf_cfg80211_info *cfg;
- struct wiphy *wiphy;
-
- wiphy = vif->wdev.wiphy;
- cfg = wiphy_priv(wiphy);
list_del(&vif->list);
cfg->vif_cnt--;
kfree(vif);
if (!cfg->vif_cnt) {
- wiphy_unregister(wiphy);
- wiphy_free(wiphy);
+ wiphy_unregister(cfg->wiphy);
+ wiphy_free(cfg->wiphy);
}
}
return 0;
case BRCMF_E_IF_DEL:
- ifp->vif = NULL;
mutex_unlock(&event->vif_event_lock);
/* event may not be upon user request */
if (brcmf_cfg80211_vif_event_armed(cfg))
wl_deinit_priv(cfg);
cfg80211_attach_out:
- brcmf_free_vif(vif);
- wiphy_free(wiphy);
+ brcmf_free_vif(cfg, vif);
return NULL;
}
wl_deinit_priv(cfg);
brcmf_btcoex_detach(cfg);
list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
- brcmf_free_vif(vif);
+ brcmf_free_vif(cfg, vif);
}
}
if (err)
goto default_conf_out;
+ brcmf_configure_arp_offload(ifp, true);
+
cfg->dongle_up = true;
default_conf_out:
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype type,
bool pm_block);
-void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
+void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
}
EXPORT_SYMBOL(il_setup_rx_scan_handlers);
-inline u16
+u16
il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
u8 n_probes)
{
memcpy(&lq, priv->stations[i].lq,
sizeof(struct iwl_link_quality_cmd));
- if (!memcmp(&lq, &zero_lq, sizeof(lq)))
+ if (memcmp(&lq, &zero_lq, sizeof(lq)))
send_lq = true;
}
spin_unlock_bh(&priv->sta_lock);
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
+ MCAST_FILTER_CMD = 0xd0,
+
/* D3 commands/notifications */
D3_CONFIG_CMD = 0xd3,
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
u8 data[0];
} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
+#define MAX_PORT_ID_NUM 2
+
+/**
+ * struct iwl_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwl_mcast_filter_cmd {
+ u8 filter_own;
+ u8 port_id;
+ u8 count;
+ u8 pass_all;
+ u8 bssid[6];
+ u8 reserved[2];
+ u8 addr_list[0];
+} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
*/
static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_data_sta *ctxt_sta)
+ struct iwl_mac_data_sta *ctxt_sta,
+ bool force_assoc_off)
{
/* We need the dtim_period to set the MAC as associated */
- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) {
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
+ !force_assoc_off) {
u32 dtim_offs;
/*
cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
/* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
+ action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
/* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta);
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
+ action == FW_CTXT_ACTION_ADD);
cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
*total_flags = 0;
}
+static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mcast_filter_cmd mcast_filter_cmd = {
+ .pass_all = 1,
+ };
+
+ memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(mcast_filter_cmd),
+ &mcast_filter_cmd);
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
return;
}
iwl_mvm_bt_coex_vif_assoc(mvm, vif);
+ iwl_mvm_configure_mcast_filter(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
switch (cmd) {
case STA_NOTIFY_SLEEP:
- if (atomic_read(&mvmsta->pending_frames) > 0)
+ if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+ atomic_t pending_frames[IWL_MVM_STATION_COUNT];
/* configured by mac80211 */
u32 rts_threshold;
CMD(BT_COEX_PROT_ENV),
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
+ CMD(MCAST_FILTER_CMD),
};
#undef CMD
else
cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
+ /*
+ * TODO: This is a WA due to a bug in the FW AUX framework that does not
+ * properly handle time events that fail to be scheduled
+ */
+ cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
+
cmd->repeats = cpu_to_le32(1);
/*
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
/* HW restart, don't assume the memory has been zeroed */
- atomic_set(&mvm_sta->pending_frames, 0);
+ atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0;
mvm_sta->tfd_queue_msk = 0;
for (i = 0; i < IEEE80211_NUM_ACS; i++)
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
}
+ /*
+ * Make sure that the tx response code sees the station as -EBUSY and
+ * calls the drain worker.
+ */
+ spin_lock_bh(&mvm_sta->lock);
/*
* There are frames pending on the AC queues for this station.
* We need to wait until all the frames are drained...
*/
- if (atomic_read(&mvm_sta->pending_frames)) {
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EBUSY));
+ spin_unlock_bh(&mvm_sta->lock);
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else {
+ spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
* @bt_reduced_txpower: is reduced tx power enabled for this station
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
- * @pending_frames: number of frames for this STA on the shared Tx queues.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
u8 max_agg_bufsize;
bool bt_reduced_txpower;
spinlock_t lock;
- atomic_t pending_frames;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
spin_unlock(&mvmsta->lock);
- if (mvmsta->vif->type == NL80211_IFTYPE_AP &&
- txq_id < IWL_MVM_FIRST_AGG_QUEUE)
- atomic_inc(&mvmsta->pending_frames);
+ if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
+ atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
/*
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
- * If there are no pending frames for this STA, notify mac80211 that
- * this station can go to sleep in its STA table.
*/
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta &&
- !WARN_ON(skb_freed > 1) &&
- mvmsta->vif->type == NL80211_IFTYPE_AP &&
- atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) {
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
+ if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
+ atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
+ if (mvmsta) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ */
+ if (mvmsta->vif->type == NL80211_IFTYPE_AP)
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ /*
+ * We might very well have taken mvmsta pointer while
+ * the station was being removed. The remove flow might
+ * have seen a pending_frame (because we didn't take
+ * the lock) even if now the queues are drained. So make
+ * really sure now that this the station is not being
+ * removed. If it is, run the drain worker to remove it.
+ */
+ spin_lock_bh(&mvmsta->lock);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ /*
+ * Station disappeared in the meantime:
+ * so we are draining.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+ } else if (!mvmsta) {
+ /* Tx response without STA, so we are draining */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
}
rcu_read_unlock();
class_destroy(hwsim_class);
}
-
-static struct device_driver mac80211_hwsim_driver = {
- .name = "mac80211_hwsim",
- .bus = &platform_bus_type,
- .owner = THIS_MODULE,
+static struct platform_driver mac80211_hwsim_driver = {
+ .driver = {
+ .name = "mac80211_hwsim",
+ .owner = THIS_MODULE,
+ },
};
static const struct net_device_ops hwsim_netdev_ops = {
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
- err = driver_register(&mac80211_hwsim_driver);
+ err = platform_driver_register(&mac80211_hwsim_driver);
if (err)
return err;
err = -ENOMEM;
goto failed_drvdata;
}
- data->dev->driver = &mac80211_hwsim_driver;
+ data->dev->driver = &mac80211_hwsim_driver.driver;
err = device_bind_driver(data->dev);
if (err != 0) {
printk(KERN_DEBUG
failed:
mac80211_hwsim_free();
failed_unregister_driver:
- driver_unregister(&mac80211_hwsim_driver);
+ platform_driver_unregister(&mac80211_hwsim_driver);
return err;
}
module_init(init_mac80211_hwsim);
mac80211_hwsim_free();
unregister_netdev(hwsim_mon);
- driver_unregister(&mac80211_hwsim_driver);
+ platform_driver_unregister(&mac80211_hwsim_driver);
}
module_exit(exit_mac80211_hwsim);
if (wdev->netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(wdev->netdev);
- if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
- free_netdev(wdev->netdev);
-
/* Clear the priv in adapter */
priv->netdev = NULL;
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
adapter->is_hs_configured = false;
+ adapter->is_suspended = false;
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
struct net_device *dev)
{
dev->netdev_ops = &mwifiex_netdev_ops;
+ dev->destructor = free_netdev;
/* Initialize private structure */
priv->current_key_index = 0;
priv->media_connected = false;
} else {
/* Multicast */
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
+ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
dev_dbg(priv->adapter->dev,
"info: Enabling All Multicast!\n");
priv->curr_pkt_filter |=
dev_dbg(priv->adapter->dev,
"info: Set multicast list=%d\n",
mcast_list->num_multicast_addr);
- /* Set multicast addresses to firmware */
- if (old_pkt_filter == priv->curr_pkt_filter) {
- /* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
- } else {
- /* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
- }
+ /* Send multicast addresses to firmware */
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_MAC_MULTICAST_ADR,
+ HostCmd_ACT_GEN_SET, 0,
+ mcast_list);
}
}
}
rxmcs == DESC92C_RATE11M)
struct phy_rx_agc_info_t {
- #if __LITTLE_ENDIAN
+ #ifdef __LITTLE_ENDIAN
u8 gain:7, trsw:1;
#else
u8 trsw:1, gain:7;
u8 stream_target_csi[2];
u8 sig_evm;
u8 rsvd_3;
-#if __LITTLE_ENDIAN
+#ifdef __LITTLE_ENDIAN
u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
u8 sgi_en:1;
u8 rxsc:2;
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+ {RTL_USB_DEVICE(0x0846, 0xf001, rtl92cu_hal_cfg)}, /*On Netwrks N300MA*/
{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
config NFC_MEI_PHY
tristate "MEI bus NFC device support"
- depends on INTEL_MEI_BUS_NFC && NFC_HCI
+ depends on INTEL_MEI && NFC_HCI
help
This adds support to use an mei bus nfc device. Select this if you
will use an HCI NFC driver for an NFC chip connected behind an
return r;
}
+ r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
+ if (r) {
+ pr_err("MEY_PHY: Event cb registration failed\n");
+ mei_cl_disable_device(phy->device);
+ phy->powered = 0;
+
+ return r;
+ }
+
phy->powered = 1;
return 0;
return -ENOMEM;
}
- r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy);
- if (r) {
- pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
- goto err_out;
- }
-
r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
&phy->hdev);
- if (r < 0)
- goto err_out;
-
- return 0;
+ if (r < 0) {
+ nfc_mei_phy_free(phy);
-err_out:
- nfc_mei_phy_free(phy);
+ return r;
+ }
- return r;
+ return 0;
}
static int microread_mei_remove(struct mei_cl_device *device)
microread_remove(phy->hdev);
- nfc_mei_phy_disable(phy);
-
nfc_mei_phy_free(phy);
return 0;
return -ENOMEM;
}
- r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy);
- if (r) {
- pr_err(PN544_DRIVER_NAME ": event cb registration failed\n");
- goto err_out;
- }
-
r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
&phy->hdev);
- if (r < 0)
- goto err_out;
-
- return 0;
+ if (r < 0) {
+ nfc_mei_phy_free(phy);
-err_out:
- nfc_mei_phy_free(phy);
+ return r;
+ }
- return r;
+ return 0;
}
static int pn544_mei_remove(struct mei_cl_device *device)
pn544_hci_remove(phy->hdev);
- nfc_mei_phy_disable(phy);
-
nfc_mei_phy_free(phy);
return 0;
*/
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
{
- if (mw > NTB_NUM_MW)
+ if (mw >= NTB_NUM_MW)
return NULL;
return ndev->mw[mw].vbase;
*/
resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
{
- if (mw > NTB_NUM_MW)
+ if (mw >= NTB_NUM_MW)
return 0;
return ndev->mw[mw].bar_sz;
*/
void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
{
- if (mw > NTB_NUM_MW)
+ if (mw >= NTB_NUM_MW)
return;
dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
ndev->mw[i].vbase =
ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
ndev->mw[i].bar_sz);
- dev_info(&pdev->dev, "MW %d size %d\n", i,
- (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+ dev_info(&pdev->dev, "MW %d size %llu\n", i,
+ pci_resource_len(pdev, MW_TO_BAR(i)));
if (!ndev->mw[i].vbase) {
dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
MW_TO_BAR(i));
#include <linux/ntb.h>
#include "ntb_hw.h"
-#define NTB_TRANSPORT_VERSION 2
+#define NTB_TRANSPORT_VERSION 3
static unsigned int transport_mtu = 0x401E;
module_param(transport_mtu, uint, 0644);
enum {
VERSION = 0,
- MW0_SZ,
- MW1_SZ,
- NUM_QPS,
QP_LINKS,
+ NUM_QPS,
+ NUM_MWS,
+ MW0_SZ_HIGH,
+ MW0_SZ_LOW,
+ MW1_SZ_HIGH,
+ MW1_SZ_LOW,
MAX_SPAD,
};
{
struct ntb_transport_client_dev *client_dev;
struct ntb_transport *nt;
- int rc;
+ int rc, i = 0;
if (list_empty(&ntb_transport_list))
return -ENODEV;
dev = &client_dev->dev;
/* setup and register client devices */
- dev_set_name(dev, "%s", device_name);
+ dev_set_name(dev, "%s%d", device_name, i);
dev->bus = &ntb_bus_type;
dev->release = ntb_client_release;
dev->parent = &ntb_query_pdev(nt->ndev)->dev;
}
list_add_tail(&client_dev->entry, &nt->client_devs);
+ i++;
}
return 0;
(qp_num / NTB_NUM_MW * rx_size);
rx_size -= sizeof(struct ntb_rx_info);
- qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
- qp->rx_max_frame = min(transport_mtu, rx_size);
+ qp->rx_buff = qp->remote_rx_info + 1;
+ /* Due to housekeeping, there must be atleast 2 buffs */
+ qp->rx_max_frame = min(transport_mtu, rx_size / 2);
qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0;
- qp->remote_rx_info->entry = qp->rx_max_entry;
+ qp->remote_rx_info->entry = qp->rx_max_entry - 1;
/* setup the hdr offsets with 0's */
for (i = 0; i < qp->rx_max_entry; i++) {
qp->rx_pkts = 0;
qp->tx_pkts = 0;
+ qp->tx_index = 0;
+}
+
+static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
+{
+ struct ntb_transport_mw *mw = &nt->mw[num_mw];
+ struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+ if (!mw->virt_addr)
+ return;
+
+ dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
+ mw->virt_addr = NULL;
}
static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
struct ntb_transport_mw *mw = &nt->mw[num_mw];
struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+ /* No need to re-setup */
+ if (mw->size == ALIGN(size, 4096))
+ return 0;
+
+ if (mw->size != 0)
+ ntb_free_mw(nt, num_mw);
+
/* Alloc memory for receiving data. Must be 4k aligned */
mw->size = ALIGN(size, 4096);
mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
GFP_KERNEL);
if (!mw->virt_addr) {
+ mw->size = 0;
dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
(int) mw->size);
return -ENOMEM;
u32 val;
int rc, i;
- /* send the local info */
- rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- 0, VERSION);
- goto out;
- }
+ /* send the local info, in the opposite order of the way we read it */
+ for (i = 0; i < NTB_NUM_MW; i++) {
+ rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
+ ntb_get_mw_size(ndev, i) >> 32);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+ (u32)(ntb_get_mw_size(ndev, i) >> 32),
+ MW0_SZ_HIGH + (i * 2));
+ goto out;
+ }
- rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
- goto out;
+ rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
+ (u32) ntb_get_mw_size(ndev, i));
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+ (u32) ntb_get_mw_size(ndev, i),
+ MW0_SZ_LOW + (i * 2));
+ goto out;
+ }
}
- rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+ rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+ NTB_NUM_MW, NUM_MWS);
goto out;
}
goto out;
}
- rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
- goto out;
- }
-
- rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+ rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- val, QP_LINKS);
+ NTB_TRANSPORT_VERSION, VERSION);
goto out;
}
goto out;
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
- rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+ rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
goto out;
}
- if (!val)
+ if (val != NTB_NUM_MW)
goto out;
- dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+ dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
- rc = ntb_set_mw(nt, 0, val);
- if (rc)
- goto out;
+ for (i = 0; i < NTB_NUM_MW; i++) {
+ u64 val64;
- rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
- goto out;
- }
+ rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n",
+ MW0_SZ_HIGH + (i * 2));
+ goto out1;
+ }
- if (!val)
- goto out;
- dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+ val64 = (u64) val << 32;
- rc = ntb_set_mw(nt, 1, val);
- if (rc)
- goto out;
+ rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n",
+ MW0_SZ_LOW + (i * 2));
+ goto out1;
+ }
+
+ val64 |= val;
+
+ dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
+
+ rc = ntb_set_mw(nt, i, val64);
+ if (rc)
+ goto out1;
+ }
nt->transport_link = NTB_LINK_UP;
return;
+out1:
+ for (i = 0; i < NTB_NUM_MW; i++)
+ ntb_free_mw(nt, i);
out:
if (ntb_hw_link_status(ndev))
schedule_delayed_work(&nt->link_work,
(qp_num / NTB_NUM_MW * tx_size);
tx_size -= sizeof(struct ntb_rx_info);
- qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
- qp->tx_max_frame = min(transport_mtu, tx_size);
+ qp->tx_mw = qp->rx_info + 1;
+ /* Due to housekeeping, there must be atleast 2 buffs */
+ qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- qp->tx_index = 0;
if (nt->debugfs_dir) {
char debugfs_name[4];
pdev = ntb_query_pdev(nt->ndev);
for (i = 0; i < NTB_NUM_MW; i++)
- if (nt->mw[i].virt_addr)
- dma_free_coherent(&pdev->dev, nt->mw[i].size,
- nt->mw[i].virt_addr,
- nt->mw[i].dma_addr);
+ ntb_free_mw(nt, i);
kfree(nt->qps);
ntb_unregister_transport(nt->ndev);
static void ntb_transport_rx(unsigned long data)
{
struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
- int rc;
+ int rc, i;
- do {
+ /* Limit the number of packets processed in a single interrupt to
+ * provide fairness to others
+ */
+ for (i = 0; i < qp->rx_max_entry; i++) {
rc = ntb_process_rxc(qp);
- } while (!rc);
+ if (rc)
+ break;
+ }
}
static void ntb_transport_rxc_db(void *data, int db_num)
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
- struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct pci_dev *pdev;
struct ntb_queue_entry *entry;
if (!qp)
return;
+ pdev = ntb_query_pdev(qp->ndev);
+
cancel_delayed_work_sync(&qp->link_work);
ntb_unregister_db_callback(qp->ndev, qp->qp_num);
*/
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
- struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct pci_dev *pdev;
int rc, val;
if (!qp)
return;
+ pdev = ntb_query_pdev(qp->ndev);
qp->client_ready = NTB_LINK_DOWN;
rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
*/
bool ntb_transport_link_query(struct ntb_transport_qp *qp)
{
+ if (!qp)
+ return false;
+
return qp->qp_link == NTB_LINK_UP;
}
EXPORT_SYMBOL_GPL(ntb_transport_link_query);
*/
unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
{
+ if (!qp)
+ return 0;
+
return qp->qp_num;
}
EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
*/
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
{
+ if (!qp)
+ return 0;
+
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
out_args->args_count = count;
for (i = 0; i < count; i++)
out_args->args[i] = be32_to_cpup(list++);
+ } else {
+ of_node_put(node);
}
/* Found it! return success */
- if (node)
- of_node_put(node);
return 0;
}
BUG();
}
- if (ldev->hba.elmmio_space.start) {
+ if (ldev->hba.elmmio_space.flags) {
err = request_resource(&iomem_resource,
&(ldev->hba.elmmio_space));
if (err < 0) {
case PAT_LMMIO:
/* used to fix up pre-initialized MEM BARs */
- if (!lba_dev->hba.lmmio_space.start) {
+ if (!lba_dev->hba.lmmio_space.flags) {
sprintf(lba_dev->hba.lmmio_name,
"PCI%02x LMMIO",
(int)lba_dev->hba.bus_num.start);
io->start;
r = &lba_dev->hba.lmmio_space;
r->name = lba_dev->hba.lmmio_name;
- } else if (!lba_dev->hba.elmmio_space.start) {
+ } else if (!lba_dev->hba.elmmio_space.flags) {
sprintf(lba_dev->hba.elmmio_name,
"PCI%02x ELMMIO",
(int)lba_dev->hba.bus_num.start);
r->name = "LBA PCI Busses";
r->start = lba_num & 0xff;
r->end = (lba_num>>8) & 0xff;
+ r->flags = IORESOURCE_BUS;
/* Set up local PCI Bus resources - we don't need them for
** Legacy boxes but it's nice to see in /proc/iomem.
pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
HBA_PORT_BASE(lba_dev->hba.hba_num));
- if (lba_dev->hba.elmmio_space.start)
+ if (lba_dev->hba.elmmio_space.flags)
pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
lba_dev->hba.lmmio_space_offset);
if (lba_dev->hba.lmmio_space.flags)
.probe = superio_probe,
};
-static int __init superio_modinit(void)
-{
- return pci_register_driver(&superio_driver);
-}
-
-static void __exit superio_exit(void)
-{
- pci_unregister_driver(&superio_driver);
-}
-
-module_init(superio_modinit);
-module_exit(superio_exit);
+module_pci_driver(superio_driver);
config PARPORT_PC_SUPERIO
bool "SuperIO chipset support"
- depends on PARPORT_PC
+ depends on PARPORT_PC && !PARISC
help
Saying Y here enables some probes for Super-IO chipsets in order to
find out things like base addresses, IRQ lines and DMA channels. It
struct parport *parport_gsc_probe_port(unsigned long base,
unsigned long base_hi, int irq,
- int dma, struct pci_dev *dev)
+ int dma, struct parisc_device *padev)
{
struct parport_gsc_private *priv;
struct parport_operations *ops;
priv->ctr_writable = 0xff;
priv->dma_buf = 0;
priv->dma_handle = 0;
- priv->dev = dev;
p->base = base;
p->base_hi = base_hi;
p->irq = irq;
return NULL;
}
+ p->dev = &padev->dev;
p->base_hi = base_hi;
p->modes = tmp.modes;
p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
}
p = parport_gsc_probe_port(port, 0, dev->irq,
- /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
+ /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev);
if (p)
parport_count++;
dev_set_drvdata(&dev->dev, p);
extern struct parport *parport_gsc_probe_port(unsigned long base,
unsigned long base_hi,
int irq, int dma,
- struct pci_dev *dev);
+ struct parisc_device *padev);
#endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */
return AE_OK ;
}
+void acpiphp_check_host_bridge(acpi_handle handle)
+{
+ struct acpiphp_bridge *bridge;
+
+ bridge = acpiphp_handle_to_bridge(handle);
+ if (bridge) {
+ acpiphp_check_bridge(bridge);
+ put_bridge(bridge);
+ }
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+}
+
static void _handle_hotplug_event_bridge(struct work_struct *work)
{
struct acpiphp_bridge *bridge;
u8 devfn;
u16 domain;
int severity;
+ struct aer_capability_regs *regs;
};
static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
- int severity)
+ int severity, struct aer_capability_regs *aer_regs)
{
unsigned long flags;
struct aer_recover_entry entry = {
.devfn = devfn,
.domain = domain,
.severity = severity,
+ .regs = aer_regs,
};
spin_lock_irqsave(&aer_recover_ring_lock, flags);
PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
continue;
}
+ cper_print_aer(pdev, entry.severity, entry.regs);
do_recovery(pdev, entry.severity);
pci_dev_put(pdev);
}
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
-void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int cper_severity,
struct aer_capability_regs *aer)
{
int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
agent = AER_GET_AGENT(aer_severity, status);
dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
status, mask);
- cper_print_bits(prefix, status, status_strs, status_strs_size);
+ cper_print_bits("", status, status_strs, status_strs_size);
dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
bool found = false;
unsigned long config;
- mutex_lock(&pctldev->mutex);
+ mutex_lock(&pinctrl_maps_mutex);
/* Parse the pinctrl map and look for the elected pin/state */
for_each_maps(maps_node, i, map) {
confops->pin_config_config_dbg_show(pctldev, s, config);
exit:
- mutex_unlock(&pctldev->mutex);
+ mutex_unlock(&pinctrl_maps_mutex);
return 0;
}
if (abx500_pdata)
pdata = abx500_pdata->gpio;
- if (!pdata) {
- if (np) {
- const struct of_device_id *match;
- match = of_match_device(abx500_gpio_match, &pdev->dev);
- if (!match)
- return -ENODEV;
- id = (unsigned long)match->data;
- } else {
- dev_err(&pdev->dev, "gpio dt and platform data missing\n");
- return -ENODEV;
- }
+ if (!(pdata || np)) {
+ dev_err(&pdev->dev, "gpio dt and platform data missing\n");
+ return -ENODEV;
}
- if (platid)
- id = platid->driver_data;
-
pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
GFP_KERNEL);
if (pct == NULL) {
pct->chip.dev = &pdev->dev;
pct->chip.base = (np) ? -1 : pdata->gpio_base;
+ if (platid)
+ id = platid->driver_data;
+ else if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_device(abx500_gpio_match, &pdev->dev);
+ if (match)
+ id = (unsigned long)match->data;
+ }
+
/* initialize the lock */
mutex_init(&pct->lock);
abx500_pinctrl_ab8505_init(&pct->soc);
break;
default:
- dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n",
- (int) platid->driver_data);
+ dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id);
mutex_destroy(&pct->lock);
return -EINVAL;
}
gpio->dev = &pdev->dev;
memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!memres) {
- dev_err(gpio->dev, "could not get GPIO memory resource\n");
- return -ENODEV;
- }
-
gpio->base = devm_ioremap_resource(&pdev->dev, memres);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
return 0;
err_no_range:
- err = gpiochip_remove(&gpio->chip);
+ if (gpiochip_remove(&gpio->chip))
+ dev_err(&pdev->dev, "failed to remove gpio chip\n");
err_no_chip:
err_no_domain:
err_no_port:
return IRQ_HANDLED;
}
+struct exynos_eint_gpio_save {
+ u32 eint_con;
+ u32 eint_fltcon0;
+ u32 eint_fltcon1;
+};
+
/*
* exynos_eint_gpio_init() - setup handling of external gpio interrupts.
* @d: driver data of samsung pinctrl driver.
{
struct samsung_pin_bank *bank;
struct device *dev = d->dev;
- unsigned int ret;
- unsigned int i;
+ int ret;
+ int i;
if (!d->irq) {
dev_err(dev, "irq number not available\n");
bank->nr_pins, &exynos_gpio_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "gpio irq domain add failed\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_domains;
+ }
+
+ bank->soc_priv = devm_kzalloc(d->dev,
+ sizeof(struct exynos_eint_gpio_save), GFP_KERNEL);
+ if (!bank->soc_priv) {
+ irq_domain_remove(bank->irq_domain);
+ ret = -ENOMEM;
+ goto err_domains;
}
}
return 0;
+
+err_domains:
+ for (--i, --bank; i >= 0; --i, --bank) {
+ if (bank->eint_type != EINT_TYPE_GPIO)
+ continue;
+ irq_domain_remove(bank->irq_domain);
+ }
+
+ return ret;
}
static void exynos_wkup_irq_unmask(struct irq_data *irqd)
return 0;
}
+static u32 exynos_eint_wake_mask = 0xffffffff;
+
+u32 exynos_get_eint_wake_mask(void)
+{
+ return exynos_eint_wake_mask;
+}
+
+static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
+
+ pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq);
+
+ if (!on)
+ exynos_eint_wake_mask |= bit;
+ else
+ exynos_eint_wake_mask &= ~bit;
+
+ return 0;
+}
+
/*
* irq_chip for wakeup interrupts
*/
.irq_mask = exynos_wkup_irq_mask,
.irq_ack = exynos_wkup_irq_ack,
.irq_set_type = exynos_wkup_irq_set_type,
+ .irq_set_wake = exynos_wkup_irq_set_wake,
};
/* interrupt handler for wakeup interrupts 0..15 */
return 0;
}
+static void exynos_pinctrl_suspend_bank(
+ struct samsung_pinctrl_drv_data *drvdata,
+ struct samsung_pin_bank *bank)
+{
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ void __iomem *regs = drvdata->virt_base;
+
+ save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset);
+ save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4);
+
+ pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
+ pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
+ pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+}
+
+static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+ struct samsung_pin_bank *bank = ctrl->pin_banks;
+ int i;
+
+ for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+ if (bank->eint_type == EINT_TYPE_GPIO)
+ exynos_pinctrl_suspend_bank(drvdata, bank);
+}
+
+static void exynos_pinctrl_resume_bank(
+ struct samsung_pinctrl_drv_data *drvdata,
+ struct samsung_pin_bank *bank)
+{
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ void __iomem *regs = drvdata->virt_base;
+
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset), save->eint_con);
+ pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset), save->eint_fltcon0);
+ pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4), save->eint_fltcon1);
+
+ writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset);
+ writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4);
+}
+
+static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+ struct samsung_pin_bank *bank = ctrl->pin_banks;
+ int i;
+
+ for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+ if (bank->eint_type == EINT_TYPE_GPIO)
+ exynos_pinctrl_resume_bank(drvdata, bank);
+}
+
/* pin banks of exynos4210 pin-controller 0 */
static struct samsung_pin_bank exynos4210_pin_banks0[] = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos4210-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos4210-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos4x12-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos4x12-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos4x12-gpio-ctrl2",
}, {
/* pin-controller instance 3 data */
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos4x12-gpio-ctrl3",
},
};
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos5250-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos5250-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos5250-gpio-ctrl2",
}, {
/* pin-controller instance 3 data */
.geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
.svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.label = "exynos5250-gpio-ctrl3",
},
};
/* External GPIO and wakeup interrupt related definitions */
#define EXYNOS_GPIO_ECON_OFFSET 0x700
+#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800
#define EXYNOS_GPIO_EMASK_OFFSET 0x900
#define EXYNOS_GPIO_EPEND_OFFSET 0xA00
#define EXYNOS_WKUP_ECON_OFFSET 0xE00
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENOENT;
- }
-
priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
int i;
for (i = 0; i < num_maps; i++)
- if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
kfree(map[i].data.configs.configs);
kfree(map);
}
#include <linux/gpio.h>
#include <linux/irqdomain.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#include "core.h"
#include "pinctrl-samsung.h"
{ "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN },
};
+/* Global list of devices (struct samsung_pinctrl_drv_data) */
+LIST_HEAD(drvdata_list);
+
static unsigned int pin_base;
static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc)
drvdata->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENOENT;
- }
-
drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(drvdata->virt_base))
return PTR_ERR(drvdata->virt_base);
ctrl->eint_wkup_init(drvdata);
platform_set_drvdata(pdev, drvdata);
+
+ /* Add to the global list */
+ list_add_tail(&drvdata->node, &drvdata_list);
+
return 0;
}
+#ifdef CONFIG_PM
+
+/**
+ * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device
+ *
+ * Save data for all banks handled by this device.
+ */
+static void samsung_pinctrl_suspend_dev(
+ struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+ void __iomem *virt_base = drvdata->virt_base;
+ int i;
+
+ for (i = 0; i < ctrl->nr_banks; i++) {
+ struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+ void __iomem *reg = virt_base + bank->pctl_offset;
+
+ u8 *offs = bank->type->reg_offset;
+ u8 *widths = bank->type->fld_width;
+ enum pincfg_type type;
+
+ /* Registers without a powerdown config aren't lost */
+ if (!widths[PINCFG_TYPE_CON_PDN])
+ continue;
+
+ for (type = 0; type < PINCFG_TYPE_NUM; type++)
+ if (widths[type])
+ bank->pm_save[type] = readl(reg + offs[type]);
+
+ if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
+ /* Some banks have two config registers */
+ bank->pm_save[PINCFG_TYPE_NUM] =
+ readl(reg + offs[PINCFG_TYPE_FUNC] + 4);
+ pr_debug("Save %s @ %p (con %#010x %08x)\n",
+ bank->name, reg,
+ bank->pm_save[PINCFG_TYPE_FUNC],
+ bank->pm_save[PINCFG_TYPE_NUM]);
+ } else {
+ pr_debug("Save %s @ %p (con %#010x)\n", bank->name,
+ reg, bank->pm_save[PINCFG_TYPE_FUNC]);
+ }
+ }
+
+ if (ctrl->suspend)
+ ctrl->suspend(drvdata);
+}
+
+/**
+ * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device
+ *
+ * Restore one of the banks that was saved during suspend.
+ *
+ * We don't bother doing anything complicated to avoid glitching lines since
+ * we're called before pad retention is turned off.
+ */
+static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+ void __iomem *virt_base = drvdata->virt_base;
+ int i;
+
+ if (ctrl->resume)
+ ctrl->resume(drvdata);
+
+ for (i = 0; i < ctrl->nr_banks; i++) {
+ struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+ void __iomem *reg = virt_base + bank->pctl_offset;
+
+ u8 *offs = bank->type->reg_offset;
+ u8 *widths = bank->type->fld_width;
+ enum pincfg_type type;
+
+ /* Registers without a powerdown config aren't lost */
+ if (!widths[PINCFG_TYPE_CON_PDN])
+ continue;
+
+ if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
+ /* Some banks have two config registers */
+ pr_debug("%s @ %p (con %#010x %08x => %#010x %08x)\n",
+ bank->name, reg,
+ readl(reg + offs[PINCFG_TYPE_FUNC]),
+ readl(reg + offs[PINCFG_TYPE_FUNC] + 4),
+ bank->pm_save[PINCFG_TYPE_FUNC],
+ bank->pm_save[PINCFG_TYPE_NUM]);
+ writel(bank->pm_save[PINCFG_TYPE_NUM],
+ reg + offs[PINCFG_TYPE_FUNC] + 4);
+ } else {
+ pr_debug("%s @ %p (con %#010x => %#010x)\n", bank->name,
+ reg, readl(reg + offs[PINCFG_TYPE_FUNC]),
+ bank->pm_save[PINCFG_TYPE_FUNC]);
+ }
+ for (type = 0; type < PINCFG_TYPE_NUM; type++)
+ if (widths[type])
+ writel(bank->pm_save[type], reg + offs[type]);
+ }
+}
+
+/**
+ * samsung_pinctrl_suspend - save pinctrl state for suspend
+ *
+ * Save data for all banks across all devices.
+ */
+static int samsung_pinctrl_suspend(void)
+{
+ struct samsung_pinctrl_drv_data *drvdata;
+
+ list_for_each_entry(drvdata, &drvdata_list, node) {
+ samsung_pinctrl_suspend_dev(drvdata);
+ }
+
+ return 0;
+}
+
+/**
+ * samsung_pinctrl_resume - restore pinctrl state for suspend
+ *
+ * Restore data for all banks across all devices.
+ */
+static void samsung_pinctrl_resume(void)
+{
+ struct samsung_pinctrl_drv_data *drvdata;
+
+ list_for_each_entry_reverse(drvdata, &drvdata_list, node) {
+ samsung_pinctrl_resume_dev(drvdata);
+ }
+}
+
+#else
+#define samsung_pinctrl_suspend NULL
+#define samsung_pinctrl_resume NULL
+#endif
+
+static struct syscore_ops samsung_pinctrl_syscore_ops = {
+ .suspend = samsung_pinctrl_suspend,
+ .resume = samsung_pinctrl_resume,
+};
+
static const struct of_device_id samsung_pinctrl_dt_match[] = {
#ifdef CONFIG_PINCTRL_EXYNOS
{ .compatible = "samsung,exynos4210-pinctrl",
static int __init samsung_pinctrl_drv_register(void)
{
+ /*
+ * Register syscore ops for save/restore of registers across suspend.
+ * It's important to ensure that this driver is running at an earlier
+ * initcall level than any arch-specific init calls that install syscore
+ * ops that turn off pad retention (like exynos_pm_resume).
+ */
+ register_syscore_ops(&samsung_pinctrl_syscore_ops);
+
return platform_driver_register(&samsung_pinctrl_driver);
}
postcore_initcall(samsung_pinctrl_drv_register);
* @gpio_chip: GPIO chip of the bank.
* @grange: linux gpio pin range supported by this bank.
* @slock: spinlock protecting bank registers
+ * @pm_save: saved register values during suspend
*/
struct samsung_pin_bank {
struct samsung_pin_bank_type *type;
u32 eint_mask;
u32 eint_offset;
char *name;
+ void *soc_priv;
struct device_node *of_node;
struct samsung_pinctrl_drv_data *drvdata;
struct irq_domain *irq_domain;
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range grange;
spinlock_t slock;
+
+ u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
};
/**
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
+ void (*suspend)(struct samsung_pinctrl_drv_data *);
+ void (*resume)(struct samsung_pinctrl_drv_data *);
+
char *label;
};
/**
* struct samsung_pinctrl_drv_data: wrapper for holding driver data together.
+ * @node: global list node
* @virt_base: register base address of the controller.
* @dev: device instance representing the controller.
* @irq: interrpt number used by the controller to notify gpio interrupts.
* @nr_function: number of such pin functions.
*/
struct samsung_pinctrl_drv_data {
+ struct list_head node;
void __iomem *virt_base;
struct device *dev;
int irq;
(*map)->data.mux.function = np->name;
if (pcs->is_pinconf) {
- if (pcs_parse_pinconf(pcs, np, function, map))
+ res = pcs_parse_pinconf(pcs, np, function, map);
+ if (res)
goto free_pingroups;
*num_maps = 2;
} else {
}
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
goto gpiochip_error;
+ }
clk_prepare_enable(clk);
return 0;
gpiochip_error:
- ret = gpiochip_remove(pctl->chip);
+ if (gpiochip_remove(pctl->chip))
+ dev_err(&pdev->dev, "failed to remove gpio chip\n");
pinctrl_error:
pinctrl_unregister(pctl->pctl_dev);
return ret;
/* get and remap our register range */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get resource\n");
- return -ENOENT;
- }
xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xway_info.membase[0]))
return PTR_ERR(xway_info.membase[0]);
};
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
- /* OVC */
- 150, 154,
+ /* PENC */
+ 154,
};
static const unsigned int usb0_mux[] = {
- USB_OVC0_MARK, USB_PENC0_MARK,
+ USB_PENC0_MARK,
+};
+static const unsigned int usb0_ovc_pins[] = {
+ /* USB_OVC */
+ 150
+};
+static const unsigned int usb0_ovc_mux[] = {
+ USB_OVC0_MARK,
};
/* - USB1 ------------------------------------------------------------------- */
static const unsigned int usb1_pins[] = {
- /* OVC */
- 152, 155,
+ /* PENC */
+ 155,
};
static const unsigned int usb1_mux[] = {
- USB_OVC1_MARK, USB_PENC1_MARK,
+ USB_PENC1_MARK,
+};
+static const unsigned int usb1_ovc_pins[] = {
+ /* USB_OVC */
+ 152,
+};
+static const unsigned int usb1_ovc_mux[] = {
+ USB_OVC1_MARK,
};
/* - USB2 ------------------------------------------------------------------- */
static const unsigned int usb2_pins[] = {
- /* OVC, PENC */
- 125, 156,
+ /* PENC */
+ 156,
};
static const unsigned int usb2_mux[] = {
- USB_OVC2_MARK, USB_PENC2_MARK,
+ USB_PENC2_MARK,
+};
+static const unsigned int usb2_ovc_pins[] = {
+ /* USB_OVC */
+ 125,
+};
+static const unsigned int usb2_ovc_mux[] = {
+ USB_OVC2_MARK,
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_cd),
SH_PFC_PIN_GROUP(sdhi3_wp),
SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc),
SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb1_ovc),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(usb2_ovc),
};
static const char * const du0_groups[] = {
static const char * const usb0_groups[] = {
"usb0",
+ "usb0_ovc",
};
static const char * const usb1_groups[] = {
"usb1",
+ "usb1_ovc",
};
static const char * const usb2_groups[] = {
"usb2",
+ "usb2_ovc",
};
static const struct sh_pfc_function pinmux_functions[] = {
#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16)
-#define WMT_PIN_WAKEUP1 WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17)
#define WMT_PIN_SD0CD WMT_PIN(0, 28)
#define WMT_PIN_VDOUT0 WMT_PIN(1, 0)
#define WMT_PIN_VDOUT1 WMT_PIN(1, 1)
return 0;
fail_range:
- err = gpiochip_remove(&data->gpio_chip);
- if (err)
+ if (gpiochip_remove(&data->gpio_chip))
dev_err(&pdev->dev, "failed to remove gpio chip\n");
fail_gpio:
pinctrl_unregister(data->pctl_dev);
}
rfkill_init_sw_state(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS));
- rfkill_set_hw_state(bluetooth_rfkill,
+ rfkill_set_hw_state(gps_rfkill,
hp_wmi_get_hw_state(HPWMI_GPS));
err = rfkill_register(gps_rfkill);
if (err)
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
+ depends on I2C || I2C=n
help
Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
tristate "TI LP8788 charger driver"
depends on MFD_LP8788
depends on LP8788_ADC
+ depends on IIO
help
Say Y to enable support for the LP8788 linear charger.
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
-MODULE_ALIAS("platform:pm2xxx-charger");
+MODULE_ALIAS("i2c:pm2xxx-charger");
MODULE_DESCRIPTION("PM2xxx charger management driver");
struct wm831x_backup *devdata = platform_get_drvdata(pdev);
power_supply_unregister(&devdata->backup);
- kfree(devdata->backup.name);
return 0;
}
chip->caps = ptp_pch_caps;
chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
-
- if (IS_ERR(chip->ptp_clock))
- return PTR_ERR(chip->ptp_clock);
+ if (IS_ERR(chip->ptp_clock)) {
+ ret = PTR_ERR(chip->ptp_clock);
+ goto err_ptp_clock_reg;
+ }
spin_lock_init(&chip->register_lock);
err_req_irq:
ptp_clock_unregister(chip->ptp_clock);
+err_ptp_clock_reg:
iounmap(chip->regs);
chip->regs = NULL;
imx->chip.npwm = 1;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
return PTR_ERR(puv3->clk);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
puv3->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(puv3->base))
return PTR_ERR(puv3->base);
pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pwm->mmio_base))
return PTR_ERR(pwm->mmio_base);
pwm->dev = &pdev->dev;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resources defined\n");
- return -ENODEV;
- }
-
pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pwm->mmio_base))
return PTR_ERR(pwm->mmio_base);
pc->chip.npwm = 1;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
pc->chip.npwm = NUM_PWM_CHANNEL;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
mutex_init(&info->pwmss_lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(info->mmio_base))
return PTR_ERR(info->mmio_base);
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
chip->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
If you are unsure about this, say N here.
+choice
+ prompt "Enumeration method"
+ depends on RAPIDIO
+ default RAPIDIO_ENUM_BASIC
+ help
+ There are different enumeration and discovery mechanisms offered
+ for RapidIO subsystem. You may select single built-in method or
+ or any number of methods to be built as modules.
+ Selecting a built-in method disables use of loadable methods.
+
+ If unsure, select Basic built-in.
+
+config RAPIDIO_ENUM_BASIC
+ tristate "Basic"
+ help
+ This option includes basic RapidIO fabric enumeration and discovery
+ mechanism similar to one described in RapidIO specification Annex 1.
+
+endchoice
+
source "drivers/rapidio/switches/Kconfig"
#
# Makefile for RapidIO interconnect services
#
-obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
+obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o
+obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
obj-$(CONFIG_RAPIDIO) += switches/
obj-$(CONFIG_RAPIDIO) += devices/
u32 intval;
u32 ch_inte;
+ /* For MSI mode disable all device-level interrupts */
+ if (priv->flags & TSI721_USING_MSI)
+ iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
dev_int = ioread32(priv->regs + TSI721_DEV_INT);
if (!dev_int)
return IRQ_NONE;
}
}
#endif
+
+ /* For MSI mode re-enable device-level interrupts */
+ if (priv->flags & TSI721_USING_MSI) {
+ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
+ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+ }
+
return IRQ_HANDLED;
}
driver_unregister(&rdrv->driver);
}
+void rio_attach_device(struct rio_dev *rdev)
+{
+ rdev->dev.bus = &rio_bus_type;
+ rdev->dev.parent = &rio_bus;
+}
+EXPORT_SYMBOL_GPL(rio_attach_device);
+
/**
* rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
* @dev: the standard device structure to match against
.name = "rapidio",
.match = rio_match_bus,
.dev_attrs = rio_dev_attrs,
+ .bus_attrs = rio_bus_attrs,
.probe = rio_device_probe,
.remove = rio_device_remove,
};
#include "rio.h"
-LIST_HEAD(rio_devices);
-
static void rio_init_em(struct rio_dev *rdev);
-DEFINE_SPINLOCK(rio_global_list_lock);
-
static int next_destid = 0;
static int next_comptag = 1;
return 0;
}
-/**
- * rio_switch_init - Sets switch operations for a particular vendor switch
- * @rdev: RIO device
- * @do_enum: Enumeration/Discovery mode flag
- *
- * Searches the RIO switch ops table for known switch types. If the vid
- * and did match a switch table entry, then call switch initialization
- * routine to setup switch-specific routines.
- */
-static void rio_switch_init(struct rio_dev *rdev, int do_enum)
-{
- struct rio_switch_ops *cur = __start_rio_switch_ops;
- struct rio_switch_ops *end = __end_rio_switch_ops;
-
- while (cur < end) {
- if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
- pr_debug("RIO: calling init routine for %s\n",
- rio_name(rdev));
- cur->init_hook(rdev, do_enum);
- break;
- }
- cur++;
- }
-
- if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
- pr_debug("RIO: adding STD routing ops for %s\n",
- rio_name(rdev));
- rdev->rswitch->add_entry = rio_std_route_add_entry;
- rdev->rswitch->get_entry = rio_std_route_get_entry;
- rdev->rswitch->clr_table = rio_std_route_clr_table;
- }
-
- if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
- printk(KERN_ERR "RIO: missing routing ops for %s\n",
- rio_name(rdev));
-}
-
-/**
- * rio_add_device- Adds a RIO device to the device model
- * @rdev: RIO device
- *
- * Adds the RIO device to the global device list and adds the RIO
- * device to the RIO device list. Creates the generic sysfs nodes
- * for an RIO device.
- */
-static int rio_add_device(struct rio_dev *rdev)
-{
- int err;
-
- err = device_add(&rdev->dev);
- if (err)
- return err;
-
- spin_lock(&rio_global_list_lock);
- list_add_tail(&rdev->global_list, &rio_devices);
- spin_unlock(&rio_global_list_lock);
-
- rio_create_sysfs_dev_files(rdev);
-
- return 0;
-}
-
-/**
- * rio_enable_rx_tx_port - enable input receiver and output transmitter of
- * given port
- * @port: Master port associated with the RIO network
- * @local: local=1 select local port otherwise a far device is reached
- * @destid: Destination ID of the device to check host bit
- * @hopcount: Number of hops to reach the target
- * @port_num: Port (-number on switch) to enable on a far end device
- *
- * Returns 0 or 1 from on General Control Command and Status Register
- * (EXT_PTR+0x3C)
- */
-inline int rio_enable_rx_tx_port(struct rio_mport *port,
- int local, u16 destid,
- u8 hopcount, u8 port_num) {
-#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
- u32 regval;
- u32 ext_ftr_ptr;
-
- /*
- * enable rx input tx output port
- */
- pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
- "%d, port_num = %d)\n", local, destid, hopcount, port_num);
-
- ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
-
- if (local) {
- rio_local_read_config_32(port, ext_ftr_ptr +
- RIO_PORT_N_CTL_CSR(0),
- ®val);
- } else {
- if (rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0)
- return -EIO;
- }
-
- if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
- /* serial */
- regval = regval | RIO_PORT_N_CTL_EN_RX_SER
- | RIO_PORT_N_CTL_EN_TX_SER;
- } else {
- /* parallel */
- regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
- | RIO_PORT_N_CTL_EN_TX_PAR;
- }
-
- if (local) {
- rio_local_write_config_32(port, ext_ftr_ptr +
- RIO_PORT_N_CTL_CSR(0), regval);
- } else {
- if (rio_mport_write_config_32(port, destid, hopcount,
- ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
- return -EIO;
- }
-#endif
- return 0;
-}
-
/**
* rio_setup_device- Allocates and sets up a RIO device
* @net: RIO network
rdev->destid);
}
- rdev->dev.bus = &rio_bus_type;
- rdev->dev.parent = &rio_bus;
+ rio_attach_device(rdev);
device_initialize(&rdev->dev);
rdev->dev.release = rio_release_dev;
/**
* rio_enum_mport- Start enumeration through a master port
* @mport: Master port to send transactions
+ * @flags: Enumeration control flags
*
* Starts the enumeration process. If somebody has enumerated our
* master port device, then give up. If not and we have an active
* link, then start recursive peer enumeration. Returns %0 if
* enumeration succeeds or %-EBUSY if enumeration fails.
*/
-int rio_enum_mport(struct rio_mport *mport)
+int rio_enum_mport(struct rio_mport *mport, u32 flags)
{
struct rio_net *net = NULL;
int rc = 0;
printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id,
mport->name);
+
+ /*
+ * To avoid multiple start requests (repeat enumeration is not supported
+ * by this method) check if enumeration/discovery was performed for this
+ * mport: if mport was added into the list of mports for a net exit
+ * with error.
+ */
+ if (mport->nnode.next || mport->nnode.prev)
+ return -EBUSY;
+
/* If somebody else enumerated our master port device, bail. */
if (rio_enum_host(mport) < 0) {
printk(KERN_INFO
/**
* rio_disc_mport- Start discovery through a master port
* @mport: Master port to send transactions
+ * @flags: discovery control flags
*
* Starts the discovery process. If we have an active link,
- * then wait for the signal that enumeration is complete.
+ * then wait for the signal that enumeration is complete (if wait
+ * is allowed).
* When enumeration completion is signaled, start recursive
* peer discovery. Returns %0 if discovery succeeds or %-EBUSY
* on failure.
*/
-int rio_disc_mport(struct rio_mport *mport)
+int rio_disc_mport(struct rio_mport *mport, u32 flags)
{
struct rio_net *net = NULL;
unsigned long to_end;
/* If master port has an active link, allocate net and discover peers */
if (rio_mport_is_active(mport)) {
+ if (rio_enum_complete(mport))
+ goto enum_done;
+ else if (flags & RIO_SCAN_ENUM_NO_WAIT)
+ return -EAGAIN;
+
pr_debug("RIO: wait for enumeration to complete...\n");
to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ;
bail:
return -EBUSY;
}
+
+static struct rio_scan rio_scan_ops = {
+ .enumerate = rio_enum_mport,
+ .discover = rio_disc_mport,
+};
+
+static bool scan;
+module_param(scan, bool, 0);
+MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery "
+ "(default = 0)");
+
+/**
+ * rio_basic_attach:
+ *
+ * When this enumeration/discovery method is loaded as a module this function
+ * registers its specific enumeration and discover routines for all available
+ * RapidIO mport devices. The "scan" command line parameter controls ability of
+ * the module to start RapidIO enumeration/discovery automatically.
+ *
+ * Returns 0 for success or -EIO if unable to register itself.
+ *
+ * This enumeration/discovery method cannot be unloaded and therefore does not
+ * provide a matching cleanup_module routine.
+ */
+
+static int __init rio_basic_attach(void)
+{
+ if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops))
+ return -EIO;
+ if (scan)
+ rio_init_mports();
+ return 0;
+}
+
+late_initcall(rio_basic_attach);
+
+MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery");
+MODULE_LICENSE("GPL");
rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
}
}
+
+static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ long val;
+ struct rio_mport *port = NULL;
+ int rc;
+
+ if (kstrtol(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val == RIO_MPORT_ANY) {
+ rc = rio_init_mports();
+ goto exit;
+ }
+
+ if (val < 0 || val >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ port = rio_find_mport((int)val);
+
+ if (!port) {
+ pr_debug("RIO: %s: mport_%d not available\n",
+ __func__, (int)val);
+ return -EINVAL;
+ }
+
+ if (!port->nscan)
+ return -EINVAL;
+
+ if (port->host_deviceid >= 0)
+ rc = port->nscan->enumerate(port, 0);
+ else
+ rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT);
+exit:
+ if (!rc)
+ rc = count;
+
+ return rc;
+}
+
+struct bus_attribute rio_bus_attrs[] = {
+ __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store),
+ __ATTR_NULL
+};
#include "rio.h"
+static LIST_HEAD(rio_devices);
+static DEFINE_SPINLOCK(rio_global_list_lock);
+
static LIST_HEAD(rio_mports);
+static DEFINE_MUTEX(rio_mport_list_lock);
static unsigned char next_portid;
static DEFINE_SPINLOCK(rio_mmap_lock);
return (RIO_GET_DID(port->sys_size, result));
}
+/**
+ * rio_add_device- Adds a RIO device to the device model
+ * @rdev: RIO device
+ *
+ * Adds the RIO device to the global device list and adds the RIO
+ * device to the RIO device list. Creates the generic sysfs nodes
+ * for an RIO device.
+ */
+int rio_add_device(struct rio_dev *rdev)
+{
+ int err;
+
+ err = device_add(&rdev->dev);
+ if (err)
+ return err;
+
+ spin_lock(&rio_global_list_lock);
+ list_add_tail(&rdev->global_list, &rio_devices);
+ spin_unlock(&rio_global_list_lock);
+
+ rio_create_sysfs_dev_files(rdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rio_add_device);
+
/**
* rio_request_inb_mbox - request inbound mailbox service
* @mport: RIO master port from which to allocate the mailbox resource
return ext_ftr_ptr;
}
+EXPORT_SYMBOL_GPL(rio_mport_get_physefb);
/**
* rio_get_comptag - Begin or continue searching for a RIO device by component tag
spin_unlock(&rio_global_list_lock);
return rdev;
}
+EXPORT_SYMBOL_GPL(rio_get_comptag);
/**
* rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
regval);
return 0;
}
+EXPORT_SYMBOL_GPL(rio_set_port_lockout);
+
+/**
+ * rio_switch_init - Sets switch operations for a particular vendor switch
+ * @rdev: RIO device
+ * @do_enum: Enumeration/Discovery mode flag
+ *
+ * Searches the RIO switch ops table for known switch types. If the vid
+ * and did match a switch table entry, then call switch initialization
+ * routine to setup switch-specific routines.
+ */
+void rio_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ struct rio_switch_ops *cur = __start_rio_switch_ops;
+ struct rio_switch_ops *end = __end_rio_switch_ops;
+
+ while (cur < end) {
+ if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
+ pr_debug("RIO: calling init routine for %s\n",
+ rio_name(rdev));
+ cur->init_hook(rdev, do_enum);
+ break;
+ }
+ cur++;
+ }
+
+ if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
+ pr_debug("RIO: adding STD routing ops for %s\n",
+ rio_name(rdev));
+ rdev->rswitch->add_entry = rio_std_route_add_entry;
+ rdev->rswitch->get_entry = rio_std_route_get_entry;
+ rdev->rswitch->clr_table = rio_std_route_clr_table;
+ }
+
+ if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
+ printk(KERN_ERR "RIO: missing routing ops for %s\n",
+ rio_name(rdev));
+}
+EXPORT_SYMBOL_GPL(rio_switch_init);
+
+/**
+ * rio_enable_rx_tx_port - enable input receiver and output transmitter of
+ * given port
+ * @port: Master port associated with the RIO network
+ * @local: local=1 select local port otherwise a far device is reached
+ * @destid: Destination ID of the device to check host bit
+ * @hopcount: Number of hops to reach the target
+ * @port_num: Port (-number on switch) to enable on a far end device
+ *
+ * Returns 0 or 1 from on General Control Command and Status Register
+ * (EXT_PTR+0x3C)
+ */
+int rio_enable_rx_tx_port(struct rio_mport *port,
+ int local, u16 destid,
+ u8 hopcount, u8 port_num)
+{
+#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
+ u32 regval;
+ u32 ext_ftr_ptr;
+
+ /*
+ * enable rx input tx output port
+ */
+ pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
+ "%d, port_num = %d)\n", local, destid, hopcount, port_num);
+
+ ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+
+ if (local) {
+ rio_local_read_config_32(port, ext_ftr_ptr +
+ RIO_PORT_N_CTL_CSR(0),
+ ®val);
+ } else {
+ if (rio_mport_read_config_32(port, destid, hopcount,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0)
+ return -EIO;
+ }
+
+ if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
+ /* serial */
+ regval = regval | RIO_PORT_N_CTL_EN_RX_SER
+ | RIO_PORT_N_CTL_EN_TX_SER;
+ } else {
+ /* parallel */
+ regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
+ | RIO_PORT_N_CTL_EN_TX_PAR;
+ }
+
+ if (local) {
+ rio_local_write_config_32(port, ext_ftr_ptr +
+ RIO_PORT_N_CTL_CSR(0), regval);
+ } else {
+ if (rio_mport_write_config_32(port, destid, hopcount,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+ return -EIO;
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port);
+
/**
* rio_chk_dev_route - Validate route to the specified device.
return 0;
}
+EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access);
/**
* rio_chk_dev_access - Validate access to the specified device.
return RIO_GET_BLOCK_ID(reg_val);
}
}
+EXPORT_SYMBOL_GPL(rio_mport_get_efb);
/**
* rio_mport_get_feature - query for devices' extended features
return 0;
}
+EXPORT_SYMBOL_GPL(rio_mport_get_feature);
/**
* rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+/**
+ * rio_find_mport - find RIO mport by its ID
+ * @mport_id: number (ID) of mport device
+ *
+ * Given a RIO mport number, the desired mport is located
+ * in the global list of mports. If the mport is found, a pointer to its
+ * data structure is returned. If no mport is found, %NULL is returned.
+ */
+struct rio_mport *rio_find_mport(int mport_id)
+{
+ struct rio_mport *port;
+
+ mutex_lock(&rio_mport_list_lock);
+ list_for_each_entry(port, &rio_mports, node) {
+ if (port->id == mport_id)
+ goto found;
+ }
+ port = NULL;
+found:
+ mutex_unlock(&rio_mport_list_lock);
+
+ return port;
+}
+
+/**
+ * rio_register_scan - enumeration/discovery method registration interface
+ * @mport_id: mport device ID for which fabric scan routine has to be set
+ * (RIO_MPORT_ANY = set for all available mports)
+ * @scan_ops: enumeration/discovery control structure
+ *
+ * Assigns enumeration or discovery method to the specified mport device (or all
+ * available mports if RIO_MPORT_ANY is specified).
+ * Returns error if the mport already has an enumerator attached to it.
+ * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns
+ * an error if was unable to find at least one available mport.
+ */
+int rio_register_scan(int mport_id, struct rio_scan *scan_ops)
+{
+ struct rio_mport *port;
+ int rc = -EBUSY;
+
+ mutex_lock(&rio_mport_list_lock);
+ list_for_each_entry(port, &rio_mports, node) {
+ if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
+ if (port->nscan && mport_id == RIO_MPORT_ANY)
+ continue;
+ else if (port->nscan)
+ break;
+
+ port->nscan = scan_ops;
+ rc = 0;
+
+ if (mport_id != RIO_MPORT_ANY)
+ break;
+ }
+ }
+ mutex_unlock(&rio_mport_list_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(rio_register_scan);
+
+/**
+ * rio_unregister_scan - removes enumeration/discovery method from mport
+ * @mport_id: mport device ID for which fabric scan routine has to be
+ * unregistered (RIO_MPORT_ANY = set for all available mports)
+ *
+ * Removes enumeration or discovery method assigned to the specified mport
+ * device (or all available mports if RIO_MPORT_ANY is specified).
+ */
+int rio_unregister_scan(int mport_id)
+{
+ struct rio_mport *port;
+
+ mutex_lock(&rio_mport_list_lock);
+ list_for_each_entry(port, &rio_mports, node) {
+ if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
+ if (port->nscan)
+ port->nscan = NULL;
+ if (mport_id != RIO_MPORT_ANY)
+ break;
+ }
+ }
+ mutex_unlock(&rio_mport_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rio_unregister_scan);
+
static void rio_fixup_device(struct rio_dev *dev)
{
}
work = container_of(_work, struct rio_disc_work, work);
pr_debug("RIO: discovery work for mport %d %s\n",
work->mport->id, work->mport->name);
- rio_disc_mport(work->mport);
+ work->mport->nscan->discover(work->mport, 0);
}
int rio_init_mports(void)
* First, run enumerations and check if we need to perform discovery
* on any of the registered mports.
*/
+ mutex_lock(&rio_mport_list_lock);
list_for_each_entry(port, &rio_mports, node) {
- if (port->host_deviceid >= 0)
- rio_enum_mport(port);
- else
+ if (port->host_deviceid >= 0) {
+ if (port->nscan)
+ port->nscan->enumerate(port, 0);
+ } else
n++;
}
+ mutex_unlock(&rio_mport_list_lock);
if (!n)
goto no_disc;
}
n = 0;
+ mutex_lock(&rio_mport_list_lock);
list_for_each_entry(port, &rio_mports, node) {
- if (port->host_deviceid < 0) {
+ if (port->host_deviceid < 0 && port->nscan) {
work[n].mport = port;
INIT_WORK(&work[n].work, disc_work_handler);
queue_work(rio_wq, &work[n].work);
n++;
}
}
+ mutex_unlock(&rio_mport_list_lock);
flush_workqueue(rio_wq);
pr_debug("RIO: destroy discovery workqueue\n");
return 0;
}
-device_initcall_sync(rio_init_mports);
-
static int hdids[RIO_MAX_MPORTS + 1];
static int rio_get_hdid(int index)
port->id = next_portid++;
port->host_deviceid = rio_get_hdid(port->id);
+ port->nscan = NULL;
+ mutex_lock(&rio_mport_list_lock);
list_add_tail(&port->node, &rio_mports);
+ mutex_unlock(&rio_mport_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
+EXPORT_SYMBOL_GPL(rio_init_mports);
#include <linux/rio.h>
#define RIO_MAX_CHK_RETRY 3
+#define RIO_MPORT_ANY (-1)
/* Functions internal to the RIO core code */
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
u8 hopcount);
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
-extern int rio_enum_mport(struct rio_mport *mport);
-extern int rio_disc_mport(struct rio_mport *mport);
extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
u8 hopcount, u16 table, u16 route_destid,
u8 route_port);
u8 hopcount, u16 table);
extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
+extern int rio_add_device(struct rio_dev *rdev);
+extern void rio_switch_init(struct rio_dev *rdev, int do_enum);
+extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
+ u8 hopcount, u8 port_num);
+extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
+extern int rio_unregister_scan(int mport_id);
+extern void rio_attach_device(struct rio_dev *rdev);
+extern struct rio_mport *rio_find_mport(int mport_id);
/* Structures internal to the RIO core code */
extern struct device_attribute rio_dev_attrs[];
-extern spinlock_t rio_global_list_lock;
+extern struct bus_attribute rio_bus_attrs[];
extern struct rio_switch_ops __start_rio_switch_ops[];
extern struct rio_switch_ops __end_rio_switch_ops[];
}
/**
- * Balance enable_count of each GPIO and actual GPIO pin control.
+ * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control
+ * @rdev: regulator_dev structure
+ * @enable: enable GPIO at initial use?
+ *
* GPIO is enabled in case of initial use. (enable_count is 0)
* GPIO is disabled when it is not shared any more. (enable_count <= 1)
*/
/**
* regulator_set_current_limit - set regulator output current limit
* @regulator: regulator source
- * @min_uA: Minimuum supported current in uA
+ * @min_uA: Minimum supported current in uA
* @max_uA: Maximum supported current in uA
*
* Sets current sink to the desired output current. This can be set during
static int power_state_active_cnt; /* will initialize to zero */
static DEFINE_SPINLOCK(power_state_active_lock);
-int power_state_active_get(void)
-{
- unsigned long flags;
- int cnt;
-
- spin_lock_irqsave(&power_state_active_lock, flags);
- cnt = power_state_active_cnt;
- spin_unlock_irqrestore(&power_state_active_lock, flags);
-
- return cnt;
-}
-
void power_state_active_enable(void)
{
unsigned long flags;
#ifdef CONFIG_REGULATOR_DEBUG
+static int power_state_active_get(void)
+{
+ unsigned long flags;
+ int cnt;
+
+ spin_lock_irqsave(&power_state_active_lock, flags);
+ cnt = power_state_active_cnt;
+ spin_unlock_irqrestore(&power_state_active_lock, flags);
+
+ return cnt;
+}
+
static struct ux500_regulator_debug {
struct dentry *dir;
struct dentry *status_file;
break;
}
- if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8))
+ if ((id == PALMAS_REG_SMPS6) || (id == PALMAS_REG_SMPS8))
ramp_delay_support = true;
if (ramp_delay_support) {
pmic->desc[id].vsel_mask = SMPS10_VSEL;
pmic->desc[id].enable_reg =
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
- PALMAS_SMPS10_STATUS);
+ PALMAS_SMPS10_CTRL);
pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
pmic->desc[id].min_uV = 3750000;
pmic->desc[id].uV_step = 1250000;
config RTC_HCTOSYS
bool "Set system time from RTC on startup and resume"
default y
- depends on !ALWAYS_USE_PERSISTENT_CLOCK
help
If you say yes here, the system time (wall clock) will be set using
the value read from a specified RTC device. This is useful to avoid
config RTC_SYSTOHC
bool "Set the RTC time based on NTP synchronization"
default y
- depends on !ALWAYS_USE_PERSISTENT_CLOCK
help
If you say yes here, the system time (wall clock) will be stored
in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
info->irq, ret);
dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
- if (pdata->rtc_delay) {
+ if (pdata && pdata->rtc_delay) {
info->lp3974_bug_workaround = true;
dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
" RTC updates will be extremely slow.\n");
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "platform_get_resource failed\n");
- return -ENXIO;
- }
-
nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(nuc900_rtc->rtc_reg))
return PTR_ERR(nuc900_rtc->rtc_reg);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_debug("%s: RTC resource data missing\n", pdev->name);
- return -ENOENT;
- }
-
rtc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rtc_base))
return PTR_ERR(rtc_base);
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
amba_set_drvdata(adev, NULL);
- free_irq(adev->irq[0], ldata->rtc);
+ free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
iounmap(ldata->base);
kfree(ldata);
/* get the memory region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get memory region resource\n");
- return -ENOENT;
- }
-
s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(s3c_rtc_base))
return PTR_ERR(s3c_rtc_base);
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Unable to allocate resources for device.\n");
- return -EBUSY;
- }
-
info->rtc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->rtc_base))
return PTR_ERR(info->rtc_base);
device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
- if (oldopm && !device->path_data.opm)
- dasd_generic_last_path_gone(device);
+ if (oldopm && !device->path_data.opm) {
+ dev_warn(&device->cdev->dev,
+ "No verified channel paths remain "
+ "for the device\n");
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "%s", "last verified path gone");
+ dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+ dasd_device_set_stop_bits(device,
+ DASD_STOPPED_DC_WAIT);
+ }
}
if (path_event[chp] & PE_PATH_AVAILABLE) {
device->path_data.opm &= ~eventlpm;
put_disk(xpram_disks[i]);
goto out;
}
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
&dev_attr_cmg.attr,
&dev_attr_shared.attr,
+ &dev_attr_chid.attr,
+ &dev_attr_chid_external.attr,
NULL,
};
static struct attribute_group chp_attr_group = {
u8 chpid;
u32:24;
u8 chpp;
- u32 unused[3];
+ u32 unused[2];
+ u16 chid;
+ u32:16;
u16 mdc;
u16:13;
u8 r:1;
dump_stack();
return;
}
- target_wait_for_sess_cmds(se_sess, 0);
+ target_wait_for_sess_cmds(se_sess);
transport_deregister_session_configfs(sess->se_sess);
transport_deregister_session(sess->se_sess);
static const struct file_operations proc_scsi_fops = {
.open = proc_scsi_host_open,
+ .release = single_release,
.read = seq_read,
.llseek = seq_lseek,
.write = proc_scsi_host_write
}
if (xfer->tx_buf)
- spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
+ if (xfer->bits_per_word > 8)
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+ else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
else
spi_writel(as, TDR, 0);
dev_dbg(master->dev.parent,
- " start pio xfer %p: len %u tx %p rx %p\n",
- xfer, xfer->len, xfer->tx_buf, xfer->rx_buf);
+ " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+ xfer->bits_per_word);
/* Enable relevant interrupts */
spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
{
u8 *txp;
u8 *rxp;
+ u16 *txp16;
+ u16 *rxp16;
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
if (xfer->rx_buf) {
- rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
- *rxp = spi_readl(as, RDR);
+ if (xfer->bits_per_word > 8) {
+ rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+ *rxp16 = spi_readl(as, RDR);
+ } else {
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
+ }
} else {
spi_readl(as, RDR);
}
-
- as->current_remaining_bytes--;
+ if (xfer->bits_per_word > 8) {
+ as->current_remaining_bytes -= 2;
+ if (as->current_remaining_bytes < 0)
+ as->current_remaining_bytes = 0;
+ } else {
+ as->current_remaining_bytes--;
+ }
if (as->current_remaining_bytes) {
if (xfer->tx_buf) {
- txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
- spi_writel(as, TDR, *txp);
+ if (xfer->bits_per_word > 8) {
+ txp16 = (u16 *)(((u8 *)xfer->tx_buf)
+ + xfer_pos + 2);
+ spi_writel(as, TDR, *txp16);
+ } else {
+ txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
+ spi_writel(as, TDR, *txp);
+ }
} else {
spi_writel(as, TDR, 0);
}
}
}
+ if (xfer->bits_per_word > 8) {
+ if (xfer->len % 2) {
+ dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
+ return -EINVAL;
+ }
+ }
+
/* FIXME implement these protocol options!! */
- if (xfer->speed_hz) {
- dev_dbg(&spi->dev, "no protocol options yet\n");
+ if (xfer->speed_hz < spi->max_speed_hz) {
+ dev_dbg(&spi->dev, "can't change speed in transfer\n");
return -ENOPROTOOPT;
}
},
{ },
};
-MODULE_DEVICE_TABLE(of, davini_spi_of_match);
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
/**
* spi_davinci_get_pdata - Get platform data from DTS binding
tegra_sflash_parse_dt(tsd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "No IO memory resource\n");
- ret = -ENODEV;
- goto exit_free_master;
- }
tsd->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tsd->base)) {
ret = PTR_ERR(tsd->base);
spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
- spi->cs_gpio = -EINVAL;
+ spi->cs_gpio = -ENOENT;
device_initialize(&spi->dev);
return spi;
}
nb = of_gpio_named_count(np, "cs-gpios");
master->num_chipselect = max(nb, (int)master->num_chipselect);
- if (nb < 1)
+ /* Return error only for an incorrectly formed cs-gpios property */
+ if (nb == 0 || nb == -ENOENT)
return 0;
+ else if (nb < 0)
+ return nb;
cs = devm_kzalloc(&master->dev,
sizeof(int) * master->num_chipselect,
return -ENOMEM;
for (i = 0; i < master->num_chipselect; i++)
- cs[i] = -EINVAL;
+ cs[i] = -ENOENT;
for (i = 0; i < nb; i++)
cs[i] = of_get_named_gpio(np, "cs-gpios", i);
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/zram/Kconfig"
-
source "drivers/staging/zsmalloc/Kconfig"
+source "drivers/staging/zram/Kconfig"
+
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
}
rv = alarm_do_ioctl(file, cmd, &ts);
+ if (rv)
+ return rv;
switch (ANDROID_ALARM_BASE_CMD(cmd)) {
case ANDROID_ALARM_GET_TIME(0):
break;
}
- return rv;
+ return 0;
}
#ifdef CONFIG_COMPAT
static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
}
rv = alarm_do_ioctl(file, cmd, &ts);
+ if (rv)
+ return rv;
switch (ANDROID_ALARM_BASE_CMD(cmd)) {
case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
break;
}
- return rv;
+ return 0;
}
#endif
* 'log->buffer' which contains the first entry readable by 'euid'
*/
static size_t get_next_entry_by_uid(struct logger_log *log,
- size_t off, uid_t euid)
+ size_t off, kuid_t euid)
{
while (off != log->w_off) {
struct logger_entry *entry;
entry = get_entry_header(log, off, &scratch);
- if (entry->euid == euid)
+ if (uid_eq(entry->euid, euid))
return off;
next_len = sizeof(struct logger_entry) + entry->len;
__s32 tid;
__s32 sec;
__s32 nsec;
- uid_t euid;
+ kuid_t euid;
char msg[0];
};
config COMEDI_NI_6527
tristate "NI 6527 support"
+ depends on HAS_DMA
select COMEDI_MITE
---help---
Enable support for the National Instruments 6527 PCI card
config COMEDI_NI_65XX
tristate "NI 65xx static dio PCI card support"
+ depends on HAS_DMA
select COMEDI_MITE
---help---
Enable support for National Instruments 65xx static dio boards.
config COMEDI_NI_660X
tristate "NI 660x counter/timer PCI card support"
+ depends on HAS_DMA
select COMEDI_NI_TIOCMD
---help---
Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
config COMEDI_NI_670X
tristate "NI 670x PCI card support"
+ depends on HAS_DMA
select COMEDI_MITE
---help---
Enable support for National Instruments PCI-6703 and PCI-6704
config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support"
+ depends on HAS_DMA
select COMEDI_NI_LABPC
select COMEDI_MITE
---help---
config COMEDI_NI_PCIDIO
tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support"
+ depends on HAS_DMA
select COMEDI_MITE
select COMEDI_8255
---help---
config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support"
+ depends on HAS_DMA
select COMEDI_NI_TIOCMD
select COMEDI_8255
select COMEDI_FC
called ssv_dnp.
config COMEDI_MITE
+ depends on HAS_DMA
tristate
config COMEDI_NI_TIOCMD
tristate
+ depends on HAS_DMA
select COMEDI_NI_TIO
select COMEDI_MITE
clear_bit(PG_reserved,
&(virt_to_page(buf->virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
+#ifdef CONFIG_HAS_DMA
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
buf->virt_addr,
buf->dma_addr);
+#endif
} else {
free_page((unsigned long)buf->virt_addr);
}
struct comedi_buf_page *buf;
unsigned i;
+ if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
+ dev_err(dev->class_dev,
+ "dma buffer allocation not supported\n");
+ return;
+ }
+
async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
if (async->buf_page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
for (i = 0; i < n_pages; i++) {
buf = &async->buf_page_list[i];
if (s->async_dma_dir != DMA_NONE)
+#ifdef CONFIG_HAS_DMA
buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
PAGE_SIZE,
&buf->dma_addr,
GFP_KERNEL |
__GFP_COMP);
+#else
+ break;
+#endif
else
buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
if (!buf->virt_addr)
return -EBUSY;
}
- if (!async->prealloc_buf)
- return -EINVAL;
-
/* make sure buffer is an integral number of pages
* (we round up) */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* clear flip-flop to make sure 2-byte registers for
* count and address get set correctly */
clear_dma_ff(devpriv->dma_chan);
- set_dma_addr(devpriv->dma_chan,
- virt_to_bus(devpriv->dma_buffer));
+ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
/* set appropriate size of transfer */
devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
if (cmd->stop_src == TRIG_COUNT &&
devpriv->count -= num_points;
/* set address and count for next transfer */
- set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer));
+ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
set_dma_count(devpriv->dma_chan, leftover * sample_size);
release_dma_lock(flags);
unsigned long dma_flags;
devpriv->dma_chan = dma_chan;
+ devpriv->dma_addr =
+ virt_to_bus(devpriv->dma_buffer);
+
dma_flags = claim_dma_lock();
disable_dma(devpriv->dma_chan);
set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
unsigned int divisor_b1;
unsigned int dma_chan; /* dma channel to use */
u16 *dma_buffer; /* buffer ai will dma into */
+ phys_addr_t dma_addr;
/* transfer size in bytes for current transfer */
unsigned int dma_transfer_size;
/* we are using dma/fifo-half-full/etc. */
static int ni_gpct_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
+#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_gpct_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd);
+#endif
static int ni_gpct_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void handle_gpct_interrupt(struct comedi_device *dev,
for (j = 0; j < NUM_GPCT; ++j) {
s = &dev->subdevices[NI_GPCT_SUBDEV(j)];
s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ
- /* | SDF_CMD_WRITE */ ;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
s->n_chan = 3;
if (board->reg_type & ni_reg_m_series_mask)
s->maxdata = 0xffffffff;
s->insn_read = &ni_gpct_insn_read;
s->insn_write = &ni_gpct_insn_write;
s->insn_config = &ni_gpct_insn_config;
+#ifdef PCIDMA
+ s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
s->do_cmd = &ni_gpct_cmd;
s->len_chanlist = 1;
s->do_cmdtest = &ni_gpct_cmdtest;
s->cancel = &ni_gpct_cancel;
s->async_dma_dir = DMA_BIDIRECTIONAL;
+#endif
s->private = &devpriv->counter_dev->counters[j];
devpriv->counter_dev->counters[j].chip_index = 0;
return ni_tio_winsn(counter, insn, data);
}
+#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
int retval;
-#ifdef PCIDMA
struct ni_gpct *counter = s->private;
/* const struct comedi_cmd *cmd = &s->async->cmd; */
ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
retval = ni_tio_cmd(counter, s->async);
-#else
- retval = -ENOTSUPP;
-#endif
return retval;
}
+#endif
+#ifdef PCIDMA
static int ni_gpct_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
-#ifdef PCIDMA
struct ni_gpct *counter = s->private;
return ni_tio_cmdtest(counter, cmd);
-#else
return -ENOTSUPP;
-#endif
}
+#endif
static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
tristate "DesignWare USB2 DRD Core Support"
depends on USB
depends on VIRT_TO_BUS
- select USB_OTG_UTILS
help
Say Y or M here if your system has a Dual Role HighSpeed
USB controller based on the DesignWare HSOTG IP Core.
bool "Enable Missed SOF Tracking"
help
Say Y here to enable logging of missed SOF events to the dmesg log.
+ WARNING: This feature is still experimental.
If in doubt, say N.
config USB_DWC2_DEBUG_PERIODIC
/* Set device flags indicating whether the HCD supports DMA */
if (hsotg->core_params->dma_enable > 0) {
- if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
- dev_warn(hsotg->dev,
- "can't enable workaround for >2GB RAM\n");
+ if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+ dev_warn(hsotg->dev, "can't set DMA mask\n");
if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
dev_warn(hsotg->dev,
"can't enable workaround for >2GB RAM\n");
static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
{
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
-#warning Compiling code to track missed SOFs
-
u16 curr_frame_number = hsotg->frame_number;
if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
hsotg->dev = &dev->dev;
+ /*
+ * Use reasonable defaults so platforms don't have to provide these.
+ */
+ if (!dev->dev.dma_mask)
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+ if (!dev->dev.coherent_dma_mask)
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "missing IRQ resource\n");
}
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&dev->dev, "missing memory base resource\n");
- return -EINVAL;
- }
-
hsotg->regs = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(hsotg->regs))
return PTR_ERR(hsotg->regs);
config WIMAX_GDM72XX_USB_PM
bool "Enable power managerment support"
- depends on USB_SUSPEND
+ depends on PM_RUNTIME
endif # WIMAX_GDM72XX_USB
static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
{
struct mxs_lradc *lradc = iio_priv(iio);
- struct iio_buffer *buffer = iio->buffer;
int ret = 0, chan, ofs = 0;
unsigned long enable = 0;
uint32_t ctrl4_set = 0;
uint32_t ctrl1_irq = 0;
const uint32_t chan_value = LRADC_CH_ACCUMULATE |
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
- const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS);
+ const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS);
if (!len)
return -EINVAL;
lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
- for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) {
+ for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
dev_info(&chip->client->dev,
"%s: i2c device found does not match expected id\n",
__func__);
+ ret = -EINVAL;
goto fail1;
}
if (ret) {
dev_err(&clientp->dev,
"%s: irq request failed", __func__);
- goto fail2;
+ goto fail1;
}
}
if (ret) {
dev_err(&clientp->dev,
"%s: iio registration failed\n", __func__);
- goto fail1;
+ goto fail2;
}
dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
return 0;
-fail1:
+fail2:
if (clientp->irq)
free_irq(clientp->irq, indio_dev);
-fail2:
+fail1:
iio_device_free(indio_dev);
return ret;
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
config DRM_IMX_PARALLEL_DISPLAY
tristate "Support for parallel displays"
depends on DRM_IMX
+ select VIDEOMODE_HELPERS
config DRM_IMX_TVE
tristate "Support for TV and VGA displays"
depends on DRM_IMX
+ select REGMAP_MMIO
help
Choose this to enable the internal Television Encoder (TVe)
found on i.MX53 processors.
config DRM_IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on DRM_IMX
+ depends on RESET_CONTROLLER
help
Choose this if you have a i.MX5/6 system and want
to use the IPU. This option only enables IPU base
config DRM_IMX_IPUV3
tristate "DRM Support for i.MX IPUv3"
depends on DRM_IMX
+ depends on DRM_IMX_IPUV3_CORE
help
Choose this if you have a i.MX5 or i.MX6 processor.
tve->dac_reg = devm_regulator_get(&pdev->dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
- regulator_enable(tve->dac_reg);
+ ret = regulator_enable(tve->dac_reg);
+ if (ret)
+ return ret;
}
tve->clk = devm_clk_get(&pdev->dev, "tve");
static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
{
- struct drm_pending_vblank_event *e;
- struct timeval now;
unsigned long flags;
struct drm_device *drm = ipu_crtc->base.dev;
spin_lock_irqsave(&drm->event_lock, flags);
-
- e = ipu_crtc->page_flip_event;
- if (!e) {
- spin_unlock_irqrestore(&drm->event_lock, flags);
- return;
- }
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ if (ipu_crtc->page_flip_event)
+ drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
ipu_crtc->page_flip_event = NULL;
-
imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-
- wake_up_interruptible(&e->base.file_priv->event_wait);
-
spin_unlock_irqrestore(&drm->event_lock, flags);
}
config SOLO6X10
tristate "Softlogic 6x10 MPEG codec cards"
depends on PCI && VIDEO_DEV && SND && I2C
+ depends on FONTS
select VIDEOBUF2_DMA_SG
select VIDEOBUF2_DMA_CONTIG
select SND_PCM
}
EXPORT_SYMBOL_GPL(nvec_register_notifier);
+/**
+ * nvec_unregister_notifier - Unregister a notifier with nvec
+ * @nvec: A &struct nvec_chip
+ * @nb: The notifier block to unregister
+ *
+ * Unregisters a notifier with @nvec. The notifier will be removed from the
+ * atomic notifier chain.
+ */
+int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
+
/**
* nvec_status_notifier - The final notifier
*
*
* Free the given message
*/
-inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
+void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
{
if (msg != &nvec->tx_scratch)
dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no mem resource?\n");
- return -ENODEV;
- }
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
return -ENODEV;
}
- i2c_clk = clk_get(&pdev->dev, "div-clk");
+ i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
if (IS_ERR(i2c_clk)) {
dev_err(nvec->dev, "failed to get controller clock\n");
return -ENODEV;
nvec_toggle_global_events(nvec, false);
mfd_remove_devices(nvec->dev);
+ nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
cancel_work_sync(&nvec->rx_work);
cancel_work_sync(&nvec->tx_work);
+ /* FIXME: needs check wether nvec is responsible for power off */
+ pm_power_off = NULL;
return 0;
}
struct notifier_block *nb,
unsigned int events);
-extern int nvec_unregister_notifier(struct device *dev,
- struct notifier_block *nb,
- unsigned int events);
+extern int nvec_unregister_notifier(struct nvec_chip *dev,
+ struct notifier_block *nb);
extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
static int nvec_kbd_remove(struct platform_device *pdev)
{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+ char disable_kbd[] = { NVEC_KBD, DISABLE_KBD },
+ uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING,
+ false };
+ nvec_write_async(nvec, uncnfg_wake_key_reporting, 3);
+ nvec_write_async(nvec, disable_kbd, 2);
+ nvec_unregister_notifier(nvec, &keys_dev.notifier);
+
input_unregister_device(keys_dev.input);
- input_free_device(keys_dev.input);
return 0;
}
MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
MODULE_DESCRIPTION("NVEC keyboard driver");
+MODULE_ALIAS("platform:nvec-kbd");
MODULE_LICENSE("GPL");
struct nvec_power *power = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&power->poller);
+ nvec_unregister_notifier(power->nvec, &power->notifier);
switch (pdev->id) {
case AC:
power_supply_unregister(&nvec_psy);
struct serio *ser_dev;
char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (ser_dev == NULL)
return -ENOMEM;
static int nvec_mouse_remove(struct platform_device *pdev)
{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE);
+ ps2_stopstreaming(ps2_dev.ser_dev);
+ nvec_unregister_notifier(nvec, &ps2_dev.notifier);
serio_unregister_port(ps2_dev.ser_dev);
return 0;
MODULE_DESCRIPTION("NVEC mouse driver");
MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_ALIAS("platform:nvec-mouse");
MODULE_LICENSE("GPL");
config DX_SEP
tristate "Discretix SEP driver"
- depends on PCI
+ depends on PCI && CRYPTO
help
Discretix SEP driver; used for the security processor subsystem
on board the Intel Mobile Internet Device and adds SEP availability
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- regulator_enable(rmi4_data->regulator);
+ retval = regulator_enable(rmi4_data->regulator);
+ if (retval) {
+ dev_err(dev, "Regulator enable failed (%d)\n", retval);
+ return retval;
+ }
enable_irq(rmi4_data->i2c_client->irq);
rmi4_data->touch_stopped = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->apdev->name);
}
- kfree(pDevice->apdev);
+ free_netdev(pDevice->apdev);
pDevice->apdev = NULL;
pDevice->bEnable8021x = false;
pDevice->bEnableHostWEP = false;
return rc;
}
+ spin_lock_irq(&pDevice->lock);
+
if (wrq->disabled) {
pDevice->ePSMode = WMAC_POWER_CAM;
PSvDisablePowerSaving(pDevice);
+ spin_unlock_irq(&pDevice->lock);
return rc;
}
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
pDevice->ePSMode = WMAC_POWER_FAST;
PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
}
+
+ spin_unlock_irq(&pDevice->lock);
+
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
#ifndef _ZCACHE_RAMSTER_H_
#define _ZCACHE_RAMSTER_H_
-#ifdef CONFIG_RAMSTER_MODULE
-#define CONFIG_RAMSTER
-#endif
-
#ifdef CONFIG_RAMSTER
#include "ramster/ramster.h"
#else
#include <linux/atomic.h>
#include "debug.h"
+ssize_t ramster_foreign_eph_pages;
+ssize_t ramster_foreign_pers_pages;
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
--- /dev/null
+ RAMSTER HOW-TO
+
+Author: Dan Magenheimer
+Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com>
+
+This is a HOWTO document for ramster which, as of this writing, is in
+the kernel as a subdirectory of zcache in drivers/staging, called ramster.
+(Zcache can be built with or without ramster functionality.) If enabled
+and properly configured, ramster allows memory capacity load balancing
+across multiple machines in a cluster. Further, the ramster code serves
+as an example of asynchronous access for zcache (as well as cleancache and
+frontswap) that may prove useful for future transcendent memory
+implementations, such as KVM and NVRAM. While ramster works today on
+any network connection that supports kernel sockets, its features may
+become more interesting on future high-speed fabrics/interconnects.
+
+Ramster requires both kernel and userland support. The userland support,
+called ramster-tools, is known to work with EL6-based distros, but is a
+set of poorly-hacked slightly-modified cluster tools based on ocfs2, which
+includes an init file, a config file, and a userland binary that interfaces
+to the kernel. This state of userland support reflects the abysmal userland
+skills of this suitably-embarrassed author; any help/patches to turn
+ramster-tools into more distributable rpms/debs useful for a wider range
+of distros would be appreciated. The source RPM that can be used as a
+starting point is available at:
+ http://oss.oracle.com/projects/tmem/files/RAMster/
+
+As a result of this author's ignorance, userland setup described in this
+HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies
+if this offends anyone!
+
+Kernel support has only been tested on x86_64. Systems with an active
+ocfs2 filesystem should work, but since ramster leverages a lot of
+code from ocfs2, there may be latent issues. A kernel configuration that
+includes CONFIG_OCFS2_FS should build OK, and should certainly run OK
+if no ocfs2 filesystem is mounted.
+
+This HOWTO demonstrates memory capacity load balancing for a two-node
+cluster, where one node called the "local" node becomes overcommitted
+and the other node called the "remote" node provides additional RAM
+capacity for use by the local node. Ramster is capable of more complex
+topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES".
+
+If you find any terms in this HOWTO unfamiliar or don't understand the
+motivation for ramster, the following LWN reading is recommended:
+-- Transcendent Memory in a Nutshell (lwn.net/Articles/454795)
+-- The future calculus of memory management (lwn.net/Articles/475681)
+And since ramster is built on top of zcache, this article may be helpful:
+-- In-kernel memory compression (lwn.net/Articles/545244)
+
+Now that you've memorized the contents of those articles, let's get started!
+
+A. PRELIMINARY
+
+1) Install two x86_64 Linux systems that are known to work when
+ upgraded to a recent upstream Linux kernel version.
+
+On each system:
+
+2) Configure, build and install, then boot Linux, just to ensure it
+ can be done with an unmodified upstream kernel. Confirm you booted
+ the upstream kernel with "uname -a".
+
+3) If you plan to do any performance testing or unless you plan to
+ test only swapping, the "WasActive" patch is also highly recommended.
+ (Search lkml.org for WasActive, apply the patch, rebuild your kernel.)
+ For a demo or simple testing, the patch can be ignored.
+
+4) Install ramster-tools as root. An x86_64 rpm for EL6-based systems
+ can be found at:
+ http://oss.oracle.com/projects/tmem/files/RAMster/
+ (Sorry but for now, non-EL6 users must recreate ramster-tools on
+ their own from source. See above.)
+
+5) Ensure that debugfs is mounted at each boot. Examples below assume it
+ is mounted at /sys/kernel/debug.
+
+B. BUILDING RAMSTER INTO THE KERNEL
+
+Do the following on each system:
+
+1) Using the kernel configuration mechanism of your choice, change
+ your config to include:
+
+ CONFIG_CLEANCACHE=y
+ CONFIG_FRONTSWAP=y
+ CONFIG_STAGING=y
+ CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m
+ CONFIG_ZCACHE=y
+ CONFIG_RAMSTER=y
+
+ For a linux-3.10 or later kernel, you should also set:
+
+ CONFIG_ZCACHE_DEBUG=y
+ CONFIG_RAMSTER_DEBUG=y
+
+ Before building the kernel please doublecheck your kernel config
+ file to ensure all of the settings are correct.
+
+2) Build this kernel and change your boot file (e.g. /etc/grub.conf)
+ so that the new kernel will boot.
+
+3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel.
+
+4) Reboot each system approximately simultaneously.
+
+5) Check dmesg to ensure there are some messages from ramster, prefixed
+ by "ramster:"
+
+ # dmesg | grep ramster
+
+ You should also see a lot of files in:
+
+ # ls /sys/kernel/debug/zcache
+ # ls /sys/kernel/debug/ramster
+
+ These are mostly counters for various zcache and ramster activities.
+ You should also see files in:
+
+ # ls /sys/kernel/mm/ramster
+
+ These are sysfs files that control ramster as we shall see.
+
+ Ramster now will act as a single-system zcache on each system
+ but doesn't yet know anything about the cluster so can't yet do
+ anything remotely.
+
+C. CONFIGURING THE RAMSTER CLUSTER
+
+This part can be error prone unless you are familiar with clustering
+filesystems. We need to describe the cluster in a /etc/ramster.conf
+file and the init scripts that parse it are extremely picky about
+the syntax.
+
+1) Create a /etc/ramster.conf file and ensure it is identical on both
+ systems. This file mimics the ocfs2 format and there is a good amount
+ of documentation that can be searched for ocfs2.conf, but you can use:
+
+ cluster:
+ name = ramster
+ node_count = 2
+ node:
+ name = system1
+ cluster = ramster
+ number = 0
+ ip_address = my.ip.ad.r1
+ ip_port = 7777
+ node:
+ name = system2
+ cluster = ramster
+ number = 1
+ ip_address = my.ip.ad.r2
+ ip_port = 7777
+
+ You must ensure that the "name" field in the file exactly matches
+ the output of "hostname" on each system; if "hostname" shows a
+ fully-qualified hostname, ensure the name is fully qualified in
+ /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper
+ ip addresses.
+
+2) Enable the ramster service and configure it. If you used the
+ EL6 ramster-tools, this would be:
+
+ # chkconfig --add ramster
+ # service ramster configure
+
+ Set "load on boot" to "y", cluster to start is "ramster" (or whatever
+ name you chose in ramster.conf), heartbeat dead threshold as "500",
+ network idle timeout as "1000000". Leave the others as default.
+
+3) Reboot both systems. After reboot, try (assuming EL6 ramster-tools):
+
+ # service ramster status
+
+ You should see "Checking RAMSTER cluster "ramster": Online". If you do
+ not, something is wrong and ramster will not work. Note that you
+ should also see that the driver for "configfs" is loaded and mounted,
+ the driver for ocfs2_dlmfs is not loaded, and some numbers for network
+ parameters. You will also see "Checking RAMSTER heartbeat: Not active".
+ That's all OK.
+
+4) Now you need to start the cluster heartbeat; the cluster is not "up"
+ until all nodes detect a heartbeat. In a real cluster, heartbeat detection
+ is done via a cluster filesystem, but ramster doesn't require one. Some
+ hack-y kernel code in ramster can start the heartbeat for you though if
+ you tell it what nodes are "up". To enable the heartbeat, do:
+
+ # echo 0 > /sys/kernel/mm/ramster/manual_node_up
+ # echo 1 > /sys/kernel/mm/ramster/manual_node_up
+
+ This must be done on BOTH nodes and, to avoid timeouts, must be done
+ approximately concurrently on both nodes. On an EL6 system, it is
+ convenient to put these lines in /etc/rc.local. To confirm that the
+ cluster is now up, on both systems do:
+
+ # dmesg | grep ramster
+
+ You should see ramster "Accepted connection" messages in dmesg on both
+ nodes after this. Note that if you check userland status again with
+
+ # service ramster status
+
+ you will still see "Checking RAMSTER heartbeat: Not active". That's
+ still OK... the ramster kernel heartbeat hack doesn't communicate to
+ userland.
+
+5) You now must tell each node the node to which it should "remotify" pages.
+ On this two node cluster, we will assume the "local" node, node 0, has
+ memory overcommitted and will use ramster to utilize RAM capacity on
+ the "remote node", node 1. To configure this, on node 0, you do:
+
+ # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+ You should see "ramster: node 1 set as remotification target" in dmesg
+ on node 0. Again, on EL6, /etc/rc.local is a good place to put this
+ on node 0 so you don't forget to do it at each boot.
+
+6) One more step: By default, the ramster code does not "remotify" any
+ pages; this is primarily for testing purposes, but sometimes it is
+ useful. This may change in the future, but for now, on node 0, you do:
+
+ # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable
+ # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable
+
+ The first enables remotifying swap (persistent, aka frontswap) pages,
+ the second enables remotifying of page cache (ephemeral, cleancache)
+ pages.
+
+ On EL6, these lines can also be put in /etc/rc.local (AFTER the
+ node_up lines), or at the beginning of a script that runs a workload.
+
+7) Note that most testing has been done with both/all machines booted
+ roughly simultaneously to avoid cluster timeouts. Ideally, you should
+ do this too unless you are trying to break ramster rather than just
+ use it. ;-)
+
+D. TESTING RAMSTER
+
+1) Note that ramster has no value unless pages get "remotified". For
+ swap/frontswap/persistent pages, this doesn't happen unless/until
+ the workload would cause swapping to occur, at which point pages
+ are put into frontswap/zcache, and the remotification thread starts
+ working. To get to the point where the system swaps, you either
+ need a workload for which the working set exceeds the RAM in the
+ system; or you need to somehow reduce the amount of RAM one of
+ the system sees. This latter is easy when testing in a VM, but
+ harder on physical systems. In some cases, "mem=xxxM" on the
+ kernel command line restricts memory, but for some values of xxx
+ the kernel may fail to boot. One may also try creating a fixed
+ RAMdisk, doing nothing with it, but ensuring that it eats up a fixed
+ amount of RAM.
+
+2) To see if ramster is working, on the "remote node", node 1, try:
+
+ # grep . /sys/kernel/debug/ramster/foreign_*
+ # # note, that is space-dot-space between grep and the pathname
+
+ to monitor the number (and max) ephemeral and persistent pages
+ that ramster has sent. If these stay at zero, ramster is not working
+ either because the workload on the local node (node 0) isn't creating
+ enough memory pressure or because "remotifying" isn't working. On the
+ local system, node 0, you can watch lots of useful information also.
+ Try:
+
+ grep . /sys/kernel/debug/zcache/*pageframes* \
+ /sys/kernel/debug/zcache/*zbytes* \
+ /sys/kernel/debug/zcache/*zpages* \
+ /sys/kernel/debug/ramster/*remote*
+
+ Of particular note are the remote_*_pages_succ_get counters. These
+ show how many disk reads and/or disk writes have been avoided on the
+ overcommitted local system by storing pages remotely using ramster.
+
+ At the risk of information overload, you can also grep:
+
+ /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/*
+
+ These show, for example, how many disk reads and/or disk writes have
+ been avoided by using zcache to optimize RAM on the local system.
+
+
+AUTOMATIC SWAP REPATRIATION
+
+You may notice that while the systems are idle, the foreign persistent
+page count on the remote machine slowly decreases. This is because
+ramster implements "frontswap selfshrinking": When possible, swap
+pages that have been remotified are slowly repatriated to the local
+machine. This is so that local RAM can be used when possible and
+so that, in case of remote machine crash, the probability of loss
+of data is reduced.
+
+REBOOTING / POWEROFF
+
+If a system is shut down while some of its swap pages still reside
+on a remote system, the system may lock up during the shutdown
+sequence. This will occur if the network is shut down before the
+swap mechansim is shut down, which is the default ordering on many
+distros. To avoid this annoying problem, simply shut off the swap
+subsystem before starting the shutdown sequence, e.g.:
+
+ # swapoff -a
+ # reboot
+
+Ideally, this swapoff-before-ifdown ordering should be enforced permanently
+using shutdown scripts.
+
+KNOWN PROBLEMS
+
+1) You may periodically see messages such as:
+
+ ramster_r2net, message length problem
+
+ This is harmless but indicates that a node is sending messages
+ containing compressed pages that exceed the maximum for zcache
+ (PAGE_SIZE*15/16). The sender side needs to be fixed.
+
+2) If you see a "No longer connected to node..." message or a "No connection
+ established with node X after N seconds", it is possible you may
+ be in an unrecoverable state. If you are certain all of the
+ appropriate cluster configuration steps described above have been
+ performed, try rebooting the two servers concurrently to see if
+ the cluster starts.
+
+ Note that "Connection to node... shutdown, state 7" is an intermediate
+ connection state. As long as you later see "Accepted connection", the
+ intermediate states are harmless.
+
+3) There are known issues in counting certain values. As a result
+ you may see periodic warnings from the kernel. Almost always you
+ will see "ramster: bad accounting for XXX". There are also "WARN_ONCE"
+ messages. If you see kernel warnings with a tombstone, please report
+ them. They are harmless but reflect bugs that need to be eventually fixed.
+
+ADVANCED RAMSTER TOPOLOGIES
+
+The kernel code for ramster can support up to eight nodes in a cluster,
+but no testing has been done with more than three nodes.
+
+In the example described above, the "remote" node serves as a RAM
+overflow for the "local" node. This can be made symmetric by appropriate
+settings of the sysfs remote_target_nodenum file. For example, by setting:
+
+ # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 0, and
+
+ # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 1, each node can serve as a RAM overflow for the other.
+
+For more than two nodes, a "RAM server" can be configured. For a
+three node system, set:
+
+ # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 1, and
+
+ # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 2. Then node 0 is a RAM server for node 1 and node 2.
+
+In this implementation of ramster, any remote node is potentially a single
+point of failure (SPOF). Though the probability of failure is reduced
+by automatic swap repatriation (see above), a proposed future enhancement
+to ramster improves high-availability for the cluster by sending a copy
+of each page of date to two other nodes. Patches welcome!
/* Used by this code. */
long ramster_flnodes;
-ssize_t ramster_foreign_eph_pages;
-ssize_t ramster_foreign_pers_pages;
/* FIXME frontswap selfshrinking knobs in debugfs? */
static LIST_HEAD(ramster_rem_op_list);
inc_ramster_foreign_eph_pages();
} else {
dec_ramster_foreign_eph_pages();
+#ifdef CONFIG_RAMSTER_DEBUG
WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
+#endif
}
} else {
if (count > 0) {
inc_ramster_foreign_pers_pages();
} else {
dec_ramster_foreign_pers_pages();
+#ifdef CONFIG_RAMSTER_DEBUG
WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
+#endif
}
}
}
#ifdef CONFIG_ZCACHE_MODULE
#ifdef CONFIG_RAMSTER
-module_param(ramster_enabled, int, S_IRUGO);
+module_param(ramster_enabled, bool, S_IRUGO);
module_param(disable_frontswap_selfshrink, int, S_IRUGO);
#endif
-module_param(disable_cleancache, int, S_IRUGO);
-module_param(disable_frontswap, int, S_IRUGO);
+module_param(disable_cleancache, bool, S_IRUGO);
+module_param(disable_frontswap, bool, S_IRUGO);
#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
#endif
-module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO);
+module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO);
module_param(zcache_comp_name, charp, S_IRUGO);
module_init(zcache_init);
MODULE_LICENSE("GPL");
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
- iscsit_release_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
return -1;
}
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
- iscsit_release_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
return -1;
}
static void iscsit_do_crypto_hash_buf(
struct hash_desc *hash,
- unsigned char *buf,
+ const void *buf,
u32 payload_length,
u32 padding,
u8 *pad_bytes,
return 0;
out:
if (cmd)
- iscsit_release_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
ping_out:
kfree(ping_data);
return ret;
if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
pr_err("Received logout request on connection that"
" is not in logged in state, ignoring request.\n");
- iscsit_release_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
return 0;
}
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest to"
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest to"
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
- u32 iov_count = 0, tx_size = 0;
- struct iscsi_reject *hdr;
+ struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
struct kvec *iov;
+ u32 iov_count = 0, tx_size;
- iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]);
+ iscsit_build_reject(cmd, conn, hdr);
iov = &cmd->iov_misc[0];
iov[iov_count].iov_base = cmd->pdu;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&cmd->data_crc);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
break;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
iscsit_mod_nopin_response_timer(conn);
iscsit_increment_maxcmdsn(cmd, sess);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
}
/*
* CmdSN is greater than the tail of the list.
*/
- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
list_add_tail(&ooo_cmdsn->ooo_list,
&sess->sess_ooo_cmdsn_list);
else {
*/
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
ooo_list) {
- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
continue;
+ /* Insert before this entry */
list_add(&ooo_cmdsn->ooo_list,
- &ooo_tmp->ooo_list);
+ ooo_tmp->ooo_list.prev);
break;
}
}
list_del(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
list_del(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
/*
* Extra parameters for ISER from RFC-5046
*/
- param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS,
+ param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
if (!param)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, OFMARKINT)) {
SET_PSTATE_NEGOTIATE(param);
- } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+ } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
if (iser == true)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, OFMARKINT))
param->state &= ~PSTATE_NEGOTIATE;
- else if (!strcmp(param->name, RDMAEXTENTIONS))
+ else if (!strcmp(param->name, RDMAEXTENSIONS))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
param->state &= ~PSTATE_NEGOTIATE;
}
INIT_LIST_HEAD(&extra_response->er_list);
- strncpy(extra_response->key, key, strlen(key) + 1);
- strncpy(extra_response->value, NOTUNDERSTOOD,
- strlen(NOTUNDERSTOOD) + 1);
+ strlcpy(extra_response->key, key, sizeof(extra_response->key));
+ strlcpy(extra_response->value, NOTUNDERSTOOD,
+ sizeof(extra_response->value));
list_add_tail(&extra_response->er_list,
¶m_list->extra_response_list);
if (phase & PHASE_SECURITY) {
if (iscsi_check_for_auth_key(key) > 0) {
- char *tmpptr = key + strlen(key);
- *tmpptr = '=';
kfree(tmpbuf);
return 1;
}
ops->SessionType = !strcmp(param->value, DISCOVERY);
pr_debug("SessionType: %s\n",
param->value);
- } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+ } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
ops->RDMAExtensions = !strcmp(param->value, YES);
pr_debug("RDMAExtensions: %s\n",
param->value);
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <scsi/iscsi_proto.h>
+
struct iscsi_extra_response {
- char key[64];
+ char key[KEY_MAXLEN];
char value[32];
struct list_head er_list;
} ____cacheline_aligned;
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
*/
-#define RDMAEXTENTIONS "RDMAExtensions"
+#define RDMAEXTENSIONS "RDMAExtensions"
#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
/*
* Initial values for iSER parameters following RFC-5046 Section 6
*/
-#define INITIAL_RDMAEXTENTIONS NO
+#define INITIAL_RDMAEXTENSIONS NO
#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
void iscsit_release_cmd(struct iscsi_cmd *cmd)
{
- struct iscsi_conn *conn = cmd->conn;
-
- iscsit_free_r2ts_from_list(cmd);
- iscsit_free_all_datain_reqs(cmd);
-
kfree(cmd->buf_ptr);
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
kfree(cmd->iov_data);
- if (conn) {
+ kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+ bool check_queues)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ if (scsi_cmd) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ iscsit_stop_dataout_timer(cmd);
+ iscsit_free_r2ts_from_list(cmd);
+ }
+ if (cmd->data_direction == DMA_FROM_DEVICE)
+ iscsit_free_all_datain_reqs(cmd);
+ }
+
+ if (conn && check_queues) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
-
- kmem_cache_free(lio_cmd_cache, cmd);
}
-void iscsit_free_cmd(struct iscsi_cmd *cmd)
+void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{
+ struct se_cmd *se_cmd = NULL;
+ int rc;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- if (cmd->data_direction == DMA_TO_DEVICE)
- iscsit_stop_dataout_timer(cmd);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, true, shutdown);
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
- transport_generic_free_cmd(&cmd->se_cmd, 1);
+ rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+ if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, true, shutdown);
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ }
break;
case ISCSI_OP_REJECT:
/*
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
- transport_generic_free_cmd(&cmd->se_cmd, 1);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, true, shutdown);
+
+ rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, true, shutdown);
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ }
break;
}
/* Fall-through */
default:
+ __iscsit_free_cmd(cmd, false, shutdown);
cmd->release_cmd(cmd);
break;
}
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
.store = target_core_store_dev_udev_path,
};
+static ssize_t target_core_show_dev_enable(void *p, char *page)
+{
+ struct se_device *dev = p;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+}
+
static ssize_t target_core_store_dev_enable(
void *p,
const char *page,
static struct target_core_configfs_attribute target_core_attr_dev_enable = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "enable",
- .ca_mode = S_IWUSR },
- .show = NULL,
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_enable,
.store = target_core_store_dev_enable,
};
struct se_dev_entry *deve = se_cmd->se_deve;
deve->total_cmds++;
- deve->total_bytes += se_cmd->data_length;
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
deve->read_bytes += se_cmd->data_length;
- deve->deve_cmds++;
-
se_lun = deve->se_lun;
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
return 0;
}
-void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
-{
- struct se_dev_entry *deve;
- unsigned long flags;
-
- spin_lock_irqsave(&se_nacl->device_list_lock, flags);
- deve = se_nacl->device_list[se_cmd->orig_fe_lun];
- deve->deve_cmds--;
- spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
-}
-
void core_update_device_list_access(
u32 mapped_lun,
u32 lun_access,
struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size;
- dev->dev_attrib.hw_block_size =
- bdev_logical_block_size(inode->i_bdev);
- dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
-
+ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
goto fail;
}
- dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
-
+ fd_dev->fd_block_size = FD_BLOCKSIZE;
/*
* Limit UNMAP emulation to 8k Number of LBAs (NoLB)
*/
dev->dev_attrib.max_write_same_len = 0x1000;
}
- fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
-
+ dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
* to handle underlying block_device resize operations.
*/
if (S_ISBLK(i->i_mode))
- dev_size = (i_size_read(i) - fd_dev->fd_block_size);
+ dev_size = i_size_read(i);
else
dev_size = fd_dev->fd_dev_size;
- return div_u64(dev_size, dev->dev_attrib.block_size);
+ return div_u64(dev_size - dev->dev_attrib.block_size,
+ dev->dev_attrib.block_size);
}
static struct sbc_ops fd_sbc_ops = {
rw = WRITE_FUA;
else if (!(q->flush_flags & REQ_FLUSH))
rw = WRITE_FUA;
+ else
+ rw = WRITE;
} else {
rw = WRITE;
}
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
-void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
u32 src_len;
u64 tmp;
+ if (dev->rd_flags & RDF_NULLIO) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
}
enum {
- Opt_rd_pages, Opt_err
+ Opt_rd_pages, Opt_rd_nullio, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
+ {Opt_rd_nullio, "rd_nullio=%d"},
{Opt_err, NULL}
};
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
+ case Opt_rd_nullio:
+ match_int(args, &arg);
+ if (arg != 1)
+ break;
+
+ pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
+ rd_dev->rd_flags |= RDF_NULLIO;
+ break;
default:
break;
}
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
- " SG_table_count: %u\n", rd_dev->rd_page_count,
- PAGE_SIZE, rd_dev->sg_table_count);
+ " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
+ PAGE_SIZE, rd_dev->sg_table_count,
+ !!(rd_dev->rd_flags & RDF_NULLIO));
return bl;
}
} ____cacheline_aligned;
#define RDF_HAS_PAGE_COUNT 0x01
+#define RDF_NULLIO 0x02
struct rd_dev {
struct se_device dev;
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
static int transport_generic_get_mem(struct se_cmd *cmd);
-static void transport_put_cmd(struct se_cmd *cmd);
+static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref);
* This routine unconditionally frees a command, and reference counting
* or list removal must be done in the caller.
*/
-static void transport_release_cmd(struct se_cmd *cmd)
+static int transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
* If this cmd has been setup with target_get_sess_cmd(), drop
* the kref and call ->release_cmd() in kref callback.
*/
- if (cmd->check_release != 0) {
- target_put_sess_cmd(cmd->se_sess, cmd);
- return;
- }
+ if (cmd->check_release != 0)
+ return target_put_sess_cmd(cmd->se_sess, cmd);
+
cmd->se_tfo->release_cmd(cmd);
+ return 1;
}
/**
*
* This routine releases our reference to the command and frees it if possible.
*/
-static void transport_put_cmd(struct se_cmd *cmd)
+static int transport_put_cmd(struct se_cmd *cmd)
{
unsigned long flags;
if (atomic_read(&cmd->t_fe_count) &&
!atomic_dec_and_test(&cmd->t_fe_count)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return 0;
}
if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_free_pages(cmd);
- transport_release_cmd(cmd);
- return;
+ return transport_release_cmd(cmd);
}
void *transport_kmap_data_sg(struct se_cmd *cmd)
}
}
-void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ int ret = 0;
+
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
transport_wait_for_tasks(cmd);
- transport_release_cmd(cmd);
+ ret = transport_release_cmd(cmd);
} else {
if (wait_for_tasks)
transport_wait_for_tasks(cmd);
- core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
-
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
}
+ return ret;
}
EXPORT_SYMBOL(transport_generic_free_cmd);
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
- unsigned long flags;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
complete(&se_cmd->cmd_wait_comp);
return;
}
list_del(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
}
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
- return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+ return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+ &se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_put_sess_cmd);
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- WARN_ON(se_sess->sess_tearing_down);
+ if (se_sess->sess_tearing_down) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return;
+ }
se_sess->sess_tearing_down = 1;
+ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
se_cmd->cmd_wait_set = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
/* target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O
- * @wait_for_tasks: Make extra transport_wait_for_tasks call
*/
-void target_wait_for_sess_cmds(
- struct se_session *se_sess,
- int wait_for_tasks)
+void target_wait_for_sess_cmds(struct se_session *se_sess)
{
struct se_cmd *se_cmd, *tmp_cmd;
- bool rc = false;
+ unsigned long flags;
list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_cmd_list, se_cmd_list) {
+ &se_sess->sess_wait_list, se_cmd_list) {
list_del(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
- if (wait_for_tasks) {
- pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- rc = transport_wait_for_tasks(se_cmd);
-
- pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
- }
-
- if (!rc) {
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
- }
+ wait_for_completion(&se_cmd->cmd_wait_comp);
+ pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
se_cmd->se_tfo->release_cmd(se_cmd);
}
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ WARN_ON(!list_empty(&se_sess->sess_cmd_list));
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENODEV;
- }
-
priv->sensor = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->sensor))
return PTR_ERR(priv->sensor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENODEV;
- }
-
priv->control = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->control))
return PTR_ERR(priv->control);
return PTR_ERR(priv->sensor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENODEV;
- }
priv->control = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->control))
return PTR_ERR(priv->control);
INIT_WORK(&data->irq_work, exynos_tmu_work);
data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!data->mem) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENOENT;
- }
-
data->base = devm_ioremap_resource(&pdev->dev, data->mem);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
*/
static void __exit ehv_bc_exit(void)
{
+ platform_driver_unregister(&ehv_bc_tty_driver);
tty_unregister_driver(ehv_bc_driver);
put_tty_driver(ehv_bc_driver);
kfree(bcs);
if (ip->type == PORT_16550A)
me->fifo[p] = 1;
- opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
- opmode &= OP_MODE_MASK;
+ if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) {
+ opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
+ opmode &= OP_MODE_MASK;
+ } else {
+ opmode = RS232_MODE;
+ }
me->iftype[p] = opmode;
mutex_unlock(&port->mutex);
}
int shiftbit;
unsigned char val, mask;
+ if (info->board->chip_flag != MOXA_MUST_MU860_HWID)
+ return -EFAULT;
+
p = tty->index % 4;
if (cmd == MOXA_SET_OP_MODE) {
if (get_user(opmode, (int __user *) argp))
ldata->real_raw = 0;
}
n_tty_set_room(tty);
+ /*
+ * Fix tty hang when I_IXON(tty) is cleared, but the tty
+ * been stopped by STOP_CHAR(tty) before it.
+ */
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+ start_tty(tty);
+ }
+
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
AIOP_INTR_BIT_3
};
+#ifdef CONFIG_PCI
static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = {
UPCI_AIOP_INTR_BIT_0,
UPCI_AIOP_INTR_BIT_1,
UPCI_AIOP_INTR_BIT_2,
UPCI_AIOP_INTR_BIT_3
};
+#endif
static Byte_t RData[RDATASIZE] = {
0x00, 0x09, 0xf6, 0x82,
static int __init init_ISA(int i);
static void rp_wait_until_sent(struct tty_struct *tty, int timeout);
static void rp_flush_buffer(struct tty_struct *tty);
-static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model);
static unsigned char GetLineNumber(int ctrl, int aiop, int ch);
static unsigned char SetLineNumber(int ctrl, int aiop, int ch);
static void rp_start(struct tty_struct *tty);
static void sModemReset(CONTROLLER_T * CtlP, int chan, int on);
static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on);
static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data);
-static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
- ByteIO_t * AiopIOList, int AiopIOListSize,
- WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
- int PeriodicOnly, int altChanRingIndicator,
- int UPCIRingInd);
static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
ByteIO_t * AiopIOList, int AiopIOListSize,
int IRQNum, Byte_t Frequency, int PeriodicOnly);
};
MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
+/* Resets the speaker controller on RocketModem II and III devices */
+static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
+{
+ ByteIO_t addr;
+
+ /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
+ if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
+ addr = CtlP->AiopIO[0] + 0x4F;
+ sOutB(addr, 0);
+ }
+
+ /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
+ if ((model == MODEL_UPCI_RM3_8PORT)
+ || (model == MODEL_UPCI_RM3_4PORT)) {
+ addr = CtlP->AiopIO[0] + 0x88;
+ sOutB(addr, 0);
+ }
+}
+
+/***************************************************************************
+Function: sPCIInitController
+Purpose: Initialization of controller global registers and controller
+ structure.
+Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
+ IRQNum,Frequency,PeriodicOnly)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int CtlNum; Controller number
+ ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
+ This list must be in the order the AIOPs will be found on the
+ controller. Once an AIOP in the list is not found, it is
+ assumed that there are no more AIOPs on the controller.
+ int AiopIOListSize; Number of addresses in AiopIOList
+ int IRQNum; Interrupt Request number. Can be any of the following:
+ 0: Disable global interrupts
+ 3: IRQ 3
+ 4: IRQ 4
+ 5: IRQ 5
+ 9: IRQ 9
+ 10: IRQ 10
+ 11: IRQ 11
+ 12: IRQ 12
+ 15: IRQ 15
+ Byte_t Frequency: A flag identifying the frequency
+ of the periodic interrupt, can be any one of the following:
+ FREQ_DIS - periodic interrupt disabled
+ FREQ_137HZ - 137 Hertz
+ FREQ_69HZ - 69 Hertz
+ FREQ_34HZ - 34 Hertz
+ FREQ_17HZ - 17 Hertz
+ FREQ_9HZ - 9 Hertz
+ FREQ_4HZ - 4 Hertz
+ If IRQNum is set to 0 the Frequency parameter is
+ overidden, it is forced to a value of FREQ_DIS.
+ int PeriodicOnly: 1 if all interrupts except the periodic
+ interrupt are to be blocked.
+ 0 is both the periodic interrupt and
+ other channel interrupts are allowed.
+ If IRQNum is set to 0 the PeriodicOnly parameter is
+ overidden, it is forced to a value of 0.
+Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
+ initialization failed.
+
+Comments:
+ If periodic interrupts are to be disabled but AIOP interrupts
+ are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
+
+ If interrupts are to be completely disabled set IRQNum to 0.
+
+ Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
+ invalid combination.
+
+ This function performs initialization of global interrupt modes,
+ but it does not actually enable global interrupts. To enable
+ and disable global interrupts use functions sEnGlobalInt() and
+ sDisGlobalInt(). Enabling of global interrupts is normally not
+ done until all other initializations are complete.
+
+ Even if interrupts are globally enabled, they must also be
+ individually enabled for each channel that is to generate
+ interrupts.
+
+Warnings: No range checking on any of the parameters is done.
+
+ No context switches are allowed while executing this function.
+
+ After this function all AIOPs on the controller are disabled,
+ they can be enabled with sEnAiop().
+*/
+static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
+ ByteIO_t * AiopIOList, int AiopIOListSize,
+ WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
+ int PeriodicOnly, int altChanRingIndicator,
+ int UPCIRingInd)
+{
+ int i;
+ ByteIO_t io;
+
+ CtlP->AltChanRingIndicator = altChanRingIndicator;
+ CtlP->UPCIRingInd = UPCIRingInd;
+ CtlP->CtlNum = CtlNum;
+ CtlP->CtlID = CTLID_0001; /* controller release 1 */
+ CtlP->BusType = isPCI; /* controller release 1 */
+
+ if (ConfigIO) {
+ CtlP->isUPCI = 1;
+ CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
+ CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
+ CtlP->AiopIntrBits = upci_aiop_intr_bits;
+ } else {
+ CtlP->isUPCI = 0;
+ CtlP->PCIIO =
+ (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
+ CtlP->AiopIntrBits = aiop_intr_bits;
+ }
+
+ sPCIControllerEOI(CtlP); /* clear EOI if warm init */
+ /* Init AIOPs */
+ CtlP->NumAiop = 0;
+ for (i = 0; i < AiopIOListSize; i++) {
+ io = AiopIOList[i];
+ CtlP->AiopIO[i] = (WordIO_t) io;
+ CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
+
+ CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
+ if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
+ break; /* done looking for AIOPs */
+
+ CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
+ sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
+ sOutB(io + _INDX_DATA, sClockPrescale);
+ CtlP->NumAiop++; /* bump count of AIOPs */
+ }
+
+ if (CtlP->NumAiop == 0)
+ return (-1);
+ else
+ return (CtlP->NumAiop);
+}
+
/*
* Called when a PCI card is found. Retrieves and stores model information,
* init's aiopic and serial port hardware.
return (CtlP->NumAiop);
}
-#ifdef CONFIG_PCI
-/***************************************************************************
-Function: sPCIInitController
-Purpose: Initialization of controller global registers and controller
- structure.
-Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
- IRQNum,Frequency,PeriodicOnly)
- CONTROLLER_T *CtlP; Ptr to controller structure
- int CtlNum; Controller number
- ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
- This list must be in the order the AIOPs will be found on the
- controller. Once an AIOP in the list is not found, it is
- assumed that there are no more AIOPs on the controller.
- int AiopIOListSize; Number of addresses in AiopIOList
- int IRQNum; Interrupt Request number. Can be any of the following:
- 0: Disable global interrupts
- 3: IRQ 3
- 4: IRQ 4
- 5: IRQ 5
- 9: IRQ 9
- 10: IRQ 10
- 11: IRQ 11
- 12: IRQ 12
- 15: IRQ 15
- Byte_t Frequency: A flag identifying the frequency
- of the periodic interrupt, can be any one of the following:
- FREQ_DIS - periodic interrupt disabled
- FREQ_137HZ - 137 Hertz
- FREQ_69HZ - 69 Hertz
- FREQ_34HZ - 34 Hertz
- FREQ_17HZ - 17 Hertz
- FREQ_9HZ - 9 Hertz
- FREQ_4HZ - 4 Hertz
- If IRQNum is set to 0 the Frequency parameter is
- overidden, it is forced to a value of FREQ_DIS.
- int PeriodicOnly: 1 if all interrupts except the periodic
- interrupt are to be blocked.
- 0 is both the periodic interrupt and
- other channel interrupts are allowed.
- If IRQNum is set to 0 the PeriodicOnly parameter is
- overidden, it is forced to a value of 0.
-Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
- initialization failed.
-
-Comments:
- If periodic interrupts are to be disabled but AIOP interrupts
- are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
-
- If interrupts are to be completely disabled set IRQNum to 0.
-
- Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
- invalid combination.
-
- This function performs initialization of global interrupt modes,
- but it does not actually enable global interrupts. To enable
- and disable global interrupts use functions sEnGlobalInt() and
- sDisGlobalInt(). Enabling of global interrupts is normally not
- done until all other initializations are complete.
-
- Even if interrupts are globally enabled, they must also be
- individually enabled for each channel that is to generate
- interrupts.
-
-Warnings: No range checking on any of the parameters is done.
-
- No context switches are allowed while executing this function.
-
- After this function all AIOPs on the controller are disabled,
- they can be enabled with sEnAiop().
-*/
-static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
- ByteIO_t * AiopIOList, int AiopIOListSize,
- WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
- int PeriodicOnly, int altChanRingIndicator,
- int UPCIRingInd)
-{
- int i;
- ByteIO_t io;
-
- CtlP->AltChanRingIndicator = altChanRingIndicator;
- CtlP->UPCIRingInd = UPCIRingInd;
- CtlP->CtlNum = CtlNum;
- CtlP->CtlID = CTLID_0001; /* controller release 1 */
- CtlP->BusType = isPCI; /* controller release 1 */
-
- if (ConfigIO) {
- CtlP->isUPCI = 1;
- CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
- CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
- CtlP->AiopIntrBits = upci_aiop_intr_bits;
- } else {
- CtlP->isUPCI = 0;
- CtlP->PCIIO =
- (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
- CtlP->AiopIntrBits = aiop_intr_bits;
- }
-
- sPCIControllerEOI(CtlP); /* clear EOI if warm init */
- /* Init AIOPs */
- CtlP->NumAiop = 0;
- for (i = 0; i < AiopIOListSize; i++) {
- io = AiopIOList[i];
- CtlP->AiopIO[i] = (WordIO_t) io;
- CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
-
- CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
- if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
- break; /* done looking for AIOPs */
-
- CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
- sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
- sOutB(io + _INDX_DATA, sClockPrescale);
- CtlP->NumAiop++; /* bump count of AIOPs */
- }
-
- if (CtlP->NumAiop == 0)
- return (-1);
- else
- return (CtlP->NumAiop);
-}
-
-/* Resets the speaker controller on RocketModem II and III devices */
-static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
-{
- ByteIO_t addr;
-
- /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
- if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
- addr = CtlP->AiopIO[0] + 0x4F;
- sOutB(addr, 0);
- }
-
- /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
- if ((model == MODEL_UPCI_RM3_8PORT)
- || (model == MODEL_UPCI_RM3_4PORT)) {
- addr = CtlP->AiopIO[0] + 0x88;
- sOutB(addr, 0);
- }
-}
-#endif
-
/***************************************************************************
Function: sReadAiopID
Purpose: Read the AIOP idenfication number directly from an AIOP.
if (nr_uarts > UART_NR)
nr_uarts = UART_NR;
- for (i = 0; i < UART_NR; i++) {
+ for (i = 0; i < nr_uarts; i++) {
struct uart_8250_port *up = &serial8250_ports[i];
struct uart_port *port = &up->port;
* if so, search for the first available port that does have
* console support.
*/
- if (co->index >= UART_NR)
+ if (co->index >= nr_uarts)
co->index = 0;
port = &serial8250_ports[co->index].port;
if (!port->iobase && !port->membase)
int line;
struct uart_port *port;
- for (line = 0; line < UART_NR; line++) {
+ for (line = 0; line < nr_uarts; line++) {
port = &serial8250_ports[line].port;
if (uart_match_port(p, port))
return line;
{
int i;
- for (i = 0; i < UART_NR; i++) {
+ for (i = 0; i < nr_uarts; i++) {
struct uart_8250_port *up = &serial8250_ports[i];
if (up->port.dev == &dev->dev)
/*
* First, find a port entry which matches.
*/
- for (i = 0; i < UART_NR; i++)
+ for (i = 0; i < nr_uarts; i++)
if (uart_match_port(&serial8250_ports[i].port, port))
return &serial8250_ports[i];
* free entry. We look for one which hasn't been previously
* used (indicated by zero iobase).
*/
- for (i = 0; i < UART_NR; i++)
+ for (i = 0; i < nr_uarts; i++)
if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
serial8250_ports[i].port.iobase == 0)
return &serial8250_ports[i];
* That also failed. Last resort is to find any entry which
* doesn't have a real port associated with it.
*/
- for (i = 0; i < UART_NR; i++)
+ for (i = 0; i < nr_uarts; i++)
if (serial8250_ports[i].port.type == PORT_UNKNOWN)
return &serial8250_ports[i];
{
struct dw8250_data *data = dev_get_drvdata(dev);
- clk_disable_unprepare(data->clk);
+ if (!IS_ERR(data->clk))
+ clk_disable_unprepare(data->clk);
return 0;
}
{
struct dw8250_data *data = dev_get_drvdata(dev);
- clk_prepare_enable(data->clk);
+ if (!IS_ERR(data->clk))
+ clk_prepare_enable(data->clk);
return 0;
}
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C4", 0 },
{ "INT33C5", 0 },
+ { "80860F0A", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
dmaengine_slave_config(chan, &rx_conf);
uap->dmarx.chan = chan;
- if (plat->dma_rx_poll_enable) {
+ if (plat && plat->dma_rx_poll_enable) {
/* Set poll rate if specified. */
if (plat->dma_rx_poll_rate) {
uap->dmarx.auto_poll_rate = false;
temp = readl(sport->port.membase + UCR2);
temp |= (UCR2_RXEN | UCR2_TXEN);
+ if (!sport->have_rtscts)
+ temp |= UCR2_IRTS;
writel(temp, sport->port.membase + UCR2);
if (USE_IRDA(sport)) {
if (rc)
return rc;
rc = platform_driver_register(&mcf_platform_driver);
- if (rc)
+ if (rc) {
+ uart_unregister_driver(&mcf_driver);
return rc;
+ }
return 0;
}
if (psc_ops && psc_ops->fifoc_init) {
ret = psc_ops->fifoc_init();
if (ret)
- return ret;
+ goto err_init;
}
ret = platform_driver_register(&mpc52xx_uart_of_driver);
if (ret) {
printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
__FILE__, ret);
- uart_unregister_driver(&mpc52xx_uart_driver);
- return ret;
+ goto err_reg;
}
return 0;
+err_reg:
+ if (psc_ops && psc_ops->fifoc_uninit)
+ psc_ops->fifoc_uninit();
+err_init:
+ uart_unregister_driver(&mpc52xx_uart_driver);
+ return ret;
}
static void __exit
dcr_write(up->dcr_host, UART_IER, up->ier);
/* free irq */
- free_irq(up->port.irq, port);
+ free_irq(up->port.irq, up);
}
static int nwpserial_verify_port(struct uart_port *port,
return pdata->get_context_loss_count(up->dev);
}
-static void serial_omap_set_forceidle(struct uart_omap_port *up)
-{
- struct omap_uart_port_info *pdata = up->dev->platform_data;
-
- if (!pdata || !pdata->set_forceidle)
- return;
-
- pdata->set_forceidle(up->dev);
-}
-
-static void serial_omap_set_noidle(struct uart_omap_port *up)
-{
- struct omap_uart_port_info *pdata = up->dev->platform_data;
-
- if (!pdata || !pdata->set_noidle)
- return;
-
- pdata->set_noidle(up->dev);
-}
-
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = up->dev->platform_data;
serial_out(up, UART_IER, up->ier);
}
- serial_omap_set_forceidle(up);
-
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
pm_runtime_get_sync(up->dev);
serial_omap_enable_ier_thri(up);
- serial_omap_set_noidle(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
ourport->tx_irq = ret;
ourport->clk = clk_get(&platdev->dev, "uart");
+ if (IS_ERR(ourport->clk)) {
+ pr_err("%s: Controller clock not found\n",
+ dev_name(&platdev->dev));
+ return PTR_ERR(ourport->clk);
+ }
+
+ ret = clk_prepare_enable(ourport->clk);
+ if (ret) {
+ pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+ clk_put(ourport->clk);
+ return ret;
+ }
/* Keep all interrupts masked and cleared */
if (s3c24xx_serial_has_interrupt_mask(port)) {
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
+ clk_disable_unprepare(ourport->clk);
return 0;
}
static void __exit s3c24xx_serial_modexit(void)
{
+ platform_driver_unregister(&samsung_serial_driver);
uart_unregister_driver(&s3c24xx_uart_drv);
}
con_set_default_unimap(vc);
vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf) {
- tty_port_destroy(&vc->port);
kfree(vc);
vc_cons[currcons].d = NULL;
return -ENOMEM;
return ret;
}
-void vc_deallocate(unsigned int currcons)
+struct vc_data *vc_deallocate(unsigned int currcons)
{
+ struct vc_data *vc = NULL;
+
WARN_CONSOLE_UNLOCKED();
if (vc_cons_allocated(currcons)) {
- struct vc_data *vc = vc_cons[currcons].d;
- struct vt_notifier_param param = { .vc = vc };
+ struct vt_notifier_param param;
+ param.vc = vc = vc_cons[currcons].d;
atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m);
vcs_remove_sysfs(currcons);
vc->vc_sw->con_deinit(vc);
put_pid(vc->vt_pid);
module_put(vc->vc_sw->owner);
kfree(vc->vc_screenbuf);
- if (currcons >= MIN_NR_CONSOLES) {
- tty_port_destroy(&vc->port);
- kfree(vc);
- }
vc_cons[currcons].d = NULL;
}
+ return vc;
}
/*
return 0;
}
+/* deallocate a single console, if possible (leave 0) */
+static int vt_disallocate(unsigned int vc_num)
+{
+ struct vc_data *vc = NULL;
+ int ret = 0;
+
+ if (!vc_num)
+ return 0;
+
+ console_lock();
+ if (VT_BUSY(vc_num))
+ ret = -EBUSY;
+ else
+ vc = vc_deallocate(vc_num);
+ console_unlock();
+
+ if (vc && vc_num >= MIN_NR_CONSOLES) {
+ tty_port_destroy(&vc->port);
+ kfree(vc);
+ }
+
+ return ret;
+}
+
+/* deallocate all unused consoles, but leave 0 */
+static void vt_disallocate_all(void)
+{
+ struct vc_data *vc[MAX_NR_CONSOLES];
+ int i;
+
+ console_lock();
+ for (i = 1; i < MAX_NR_CONSOLES; i++)
+ if (!VT_BUSY(i))
+ vc[i] = vc_deallocate(i);
+ else
+ vc[i] = NULL;
+ console_unlock();
+
+ for (i = 1; i < MAX_NR_CONSOLES; i++) {
+ if (vc[i] && i >= MIN_NR_CONSOLES) {
+ tty_port_destroy(&vc[i]->port);
+ kfree(vc[i]);
+ }
+ }
+}
/*
ret = -ENXIO;
break;
}
- if (arg == 0) {
- /* deallocate all unused consoles, but leave 0 */
- console_lock();
- for (i=1; i<MAX_NR_CONSOLES; i++)
- if (! VT_BUSY(i))
- vc_deallocate(i);
- console_unlock();
- } else {
- /* deallocate a single console, if possible */
- arg--;
- if (VT_BUSY(arg))
- ret = -EBUSY;
- else if (arg) { /* leave 0 */
- console_lock();
- vc_deallocate(arg);
- console_unlock();
- }
- }
+ if (arg == 0)
+ vt_disallocate_all();
+ else
+ ret = vt_disallocate(--arg);
break;
case VT_RESIZE:
config UIO_DMEM_GENIRQ
tristate "Userspace platform driver with generic irq and dynamic memory"
+ depends on HAS_DMA
help
Platform driver for Userspace I/O devices, including generic
interrupt handling code. Shared interrupts are not supported.
{
int ret, len;
__le32 *buf;
- int offb, offd;
+ int offb;
+ unsigned int offd;
const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
config USB_CHIPIDEA_HOST
bool "ChipIdea host controller"
depends on USB=y || USB=USB_CHIPIDEA
- depends on USB_EHCI_HCD
+ depends on USB_EHCI_HCD=y
select USB_EHCI_ROOT_HUB_TT
help
Say Y here to enable host controller functionality of the
ci13xxx_imx_platdata.phy = data->phy;
- if (!pdev->dev.dma_mask) {
- pdev->dev.dma_mask = devm_kzalloc(&pdev->dev,
- sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
- if (!pdev->dev.dma_mask) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "Failed to alloc dma_mask!\n");
- goto err;
- }
- *pdev->dev.dma_mask = DMA_BIT_MASK(32);
- dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask);
- }
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (usbmisc_ops && usbmisc_ops->init) {
ret = usbmisc_ops->init(&pdev->dev);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing resource\n");
- return -ENODEV;
- }
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
config USB_OTG
bool "OTG support"
- depends on USB_SUSPEND
+ depends on PM_RUNTIME
default n
help
The most notable feature of USB OTG is support for a
goto error;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
- /* arbitrary limit,
- * sufficient for USB 2.0 high-bandwidth iso */
- if (isopkt[u].length > 8192) {
+ /*
+ * arbitrary limit need for USB 3.0
+ * bMaxBurst (0~15 allowed, 1~16 packets)
+ * bmAttributes (bit 1:0, mult 0~2, 1~3 packets)
+ * sizemax: 1024 * 16 * 3 = 49152
+ */
+ if (isopkt[u].length > 49152) {
ret = -EINVAL;
goto error;
}
/* Edirol SD-20 */
{ USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Alcor Micro Corp. Hub */
+ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
config USB_DWC3_HOST
bool "Host only mode"
- depends on USB
+ depends on USB=y || USB=USB_DWC3
help
Select this when you want to use DWC3 in host mode only,
thereby the gadget feature will be regressed.
config USB_DWC3_GADGET
bool "Gadget only mode"
- depends on USB_GADGET
+ depends on USB_GADGET=y || USB_GADGET=USB_DWC3
help
Select this when you want to use DWC3 in gadget mode only,
thereby the host feature will be regressed.
config USB_DWC3_DUAL_ROLE
bool "Dual Role mode"
- depends on (USB && USB_GADGET)
+ depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
help
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
return 0;
}
-static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32);
-
static int dwc3_exynos_probe(struct platform_device *pdev)
{
struct dwc3_exynos *exynos;
* Once we move to full device tree support this will vanish off.
*/
if (!dev->dma_mask)
- dev->dma_mask = &dwc3_exynos_dma_mask;
+ dev->dma_mask = &dev->coherent_dma_mask;
+ if (!dev->coherent_dma_mask)
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
platform_set_drvdata(pdev, exynos);
{
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
+ device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
platform_device_unregister(exynos->usb2_phy);
platform_device_unregister(exynos->usb3_phy);
- device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
clk_disable_unprepare(exynos->clk);
{
struct dwc3_pci *glue = pci_get_drvdata(pci);
+ platform_device_unregister(glue->dwc3);
platform_device_unregister(glue->usb2_phy);
platform_device_unregister(glue->usb3_phy);
- platform_device_unregister(glue->dwc3);
pci_set_drvdata(pci, NULL);
pci_disable_device(pci);
}
dep = dwc->eps[epnum];
if (!dep)
continue;
-
- dwc3_free_trb_pool(dep);
-
- if (epnum != 0 && epnum != 1)
+ /*
+ * Physical endpoints 0 and 1 are special; they form the
+ * bi-directional USB endpoint 0.
+ *
+ * For those two physical endpoints, we don't allocate a TRB
+ * pool nor do we add them the endpoints list. Due to that, we
+ * shouldn't do these two operations otherwise we would end up
+ * with all sorts of bugs when removing dwc3.ko.
+ */
+ if (epnum != 0 && epnum != 1) {
+ dwc3_free_trb_pool(dep);
list_del(&dep->endpoint.ep_list);
+ }
kfree(dep);
}
depends on ARCH_LPC32XX
depends on USB_PHY
select USB_ISP1301
- select USB_OTG_UTILS
help
This option selects the USB device controller in the LPC32xx SoC.
err_get_hclk:
clk_put(pclk);
- platform_set_drvdata(pdev, NULL);
-
return ret;
}
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "error finding USBD resource\n");
- return -ENXIO;
- }
-
udc->usbd_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(udc->usbd_regs))
return PTR_ERR(udc->usbd_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(dev, "error finding IUDMA resource\n");
- return -ENXIO;
- }
-
udc->iudma_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(udc->iudma_regs))
return PTR_ERR(udc->iudma_regs);
usb_del_gadget_udc(&udc->gadget);
BUG_ON(udc->driver);
- platform_set_drvdata(pdev, NULL);
bcm63xx_uninit_udc_hw(udc);
return 0;
gi->gstrings[i] = NULL;
s = usb_gstrings_attach(&gi->cdev, gi->gstrings,
USB_GADGET_FIRST_AVAIL_IDX);
- if (IS_ERR(s))
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
goto err_comp_cleanup;
+ }
gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
}
cfg->gstrings[i] = NULL;
s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1);
- if (IS_ERR(s))
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
goto err_comp_cleanup;
+ }
c->iConfiguration = s[0].id;
}
struct dummy *dum = platform_get_drvdata(pdev);
usb_del_gadget_udc(&dum->gadget);
- platform_set_drvdata(pdev, NULL);
device_remove_file(&dum->gadget.dev, &dev_attr_function);
return 0;
}
}
for (i = 0; i < mod_data.num; i++) {
dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL);
- if (!dum[i])
+ if (!dum[i]) {
+ retval = -ENOMEM;
goto err_add_pdata;
+ }
retval = platform_device_add_data(the_hcd_pdev[i], &dum[i],
sizeof(void *));
if (retval)
* @c: the configuration to support the network link
* @ethaddr: a buffer in which the ethernet address of the host side
* side of the link was recorded
+ * @dev: eth_dev structure
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
* @c: the configuration to support the network link
* @ethaddr: a buffer in which the ethernet address of the host side
* side of the link was recorded
+ * @dev: eth_dev structure
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
{
struct snd_card *card = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (card)
return snd_card_free(card);
fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
GFP_KERNEL);
- if (fusb300->ep0_req == NULL)
+ if (fusb300->ep0_req == NULL) {
+ ret = -ENOMEM;
goto clean_up3;
+ }
init_controller(fusb300);
ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
if (pdata->exit)
pdata->exit(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
m66592->epaddr2ep[0] = &m66592->ep[0];
m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
- if (m66592->ep0_req == NULL)
+ if (m66592->ep0_req == NULL) {
+ ret = -ENOMEM;
goto clean_up3;
+ }
m66592->ep0_req->complete = nop_completion;
init_controller(m66592);
dev->transceiver = NULL;
}
- platform_set_drvdata(pdev, NULL);
the_controller = NULL;
return 0;
}
r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
GFP_KERNEL);
- if (r8a66597->ep0_req == NULL)
+ if (r8a66597->ep0_req == NULL) {
+ ret = -ENOMEM;
goto clean_up3;
+ }
r8a66597->ep0_req->complete = nop_completion;
ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
if (hs_req->req.length == 0)
return;
- usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in);
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
}
/**
irq = gpio_to_irq(udc_info->vbus_pin);
if (irq < 0) {
dev_err(dev, "no irq for gpio vbus pin\n");
+ retval = irq;
goto err_gpio_claim;
}
iounmap(base_addr);
release_mem_region(rsrc_start, rsrc_len);
- platform_set_drvdata(pdev, NULL);
-
if (!IS_ERR(udc_clock) && udc_clock != NULL) {
clk_disable(udc_clock);
clk_put(udc_clock);
ss_opts->bulk_buflen = gzero_options.bulk_buflen;
func_ss = usb_get_function(func_inst_ss);
- if (IS_ERR(func_ss))
+ if (IS_ERR(func_ss)) {
+ status = PTR_ERR(func_ss);
goto err_put_func_inst_ss;
+ }
func_inst_lb = usb_get_function_instance("Loopback");
- if (IS_ERR(func_inst_lb))
+ if (IS_ERR(func_inst_lb)) {
+ status = PTR_ERR(func_inst_lb);
goto err_put_func_ss;
+ }
lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst);
lb_opts->bulk_buflen = gzero_options.bulk_buflen;
config USB_UHCI_HCD
tristate "UHCI HCD (most Intel and VIA) support"
- depends on PCI || SPARC_LEON || ARCH_VT8500
+ depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
---help---
The Universal Host Controller Interface is a standard by Intel for
accessing the USB hardware in the PC (which is also called the USB
config USB_UHCI_SUPPORT_NON_PCI_HC
bool
- depends on USB_UHCI_HCD
- default y if (SPARC_LEON || ARCH_VT8500)
+ default y if (SPARC_LEON || USB_UHCI_PLATFORM)
config USB_UHCI_PLATFORM
- bool "Generic UHCI Platform Driver support"
- depends on USB_UHCI_SUPPORT_NON_PCI_HC
+ bool
default y if ARCH_VT8500
- ---help---
- Enable support for generic UHCI platform devices that require no
- additional configuration.
config USB_UHCI_BIG_ENDIAN_MMIO
bool
- depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON
- default y
+ default y if SPARC_LEON
config USB_UHCI_BIG_ENDIAN_DESC
bool
- depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON
- default y
+ default y if SPARC_LEON
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
/*-------------------------------------------------------------------------*/
-static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32);
-
static int ehci_atmel_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &at91_ehci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
#define PLATFORM_DRIVER ehci_hcd_sead3_driver
#endif
-#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
- !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
- !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
- !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
- !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \
- !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \
- !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \
- !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \
- !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \
- !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \
- !defined(PLATFORM_DRIVER) && \
- !defined(PS3_SYSTEM_BUS_DRIVER) && \
- !defined(OF_PLATFORM_DRIVER) && \
- !defined(XILINX_OF_PLATFORM_DRIVER)
-#error "missing bus glue for ehci-hcd"
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
.extra_priv_size = sizeof(struct omap_hcd),
};
-static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32);
-
/**
* ehci_hcd_omap_probe - initialize TI-based HCDs
*
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &omap_ehci_dma_mask;
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ if (!dev->coherent_dma_mask)
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
dev_name(dev));
}
}
-static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32);
-
static int ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
* now. Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &ehci_orion_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
dev_err(dev, "can't request ehci vbus gpio %d", gpio);
}
-static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32);
-
static int s5p_ehci_probe(struct platform_device *pdev)
{
struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
* Once we move to full device tree support this will vanish off.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &ehci_s5p_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (IS_ERR(phy)) {
/* Fallback to pdata */
if (!pdata) {
+ usb_put_hcd(hcd);
dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
return -EPROBE_DEFER;
} else {
}
static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
rc = qh_schedule(ehci, qh);
+ if (rc == 0) {
+ qh_refresh(ehci, qh);
+ qh_link_periodic(ehci, qh);
+ }
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
*
* FIXME kill the now-dysfunctional queued urbs
*/
- if (rc != 0)
+ else {
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
qh, rc);
+ }
}
/* maybe turn off periodic schedule */
static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
ehci_spear_drv_resume);
-static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32);
-
static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd ;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &spear_ehci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
writel(val, base + TEGRA_USB_PORTSC1);
}
-static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
-
static int tegra_ehci_probe(struct platform_device *pdev)
{
struct resource *res;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &tegra_ehci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
setup_vbus_gpio(pdev, pdata);
int retval = 1;
unsigned long flags;
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ /* if !PM_RUNTIME, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
pr_warning("isp1760: IRQ resource not available\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto cleanup;
}
+
irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
if (priv) {
MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
-static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32);
-
static int ohci_at91_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &at91_ohci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
.start_port_reset = ohci_start_port_reset,
};
-static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32);
-
static int exynos_ohci_probe(struct platform_device *pdev)
{
struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
* Once we move to full device tree support this will vanish off.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &ohci_exynos_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
urb->start_frame = frame;
}
} else if (ed->type == PIPE_ISOCHRONOUS) {
- u16 next = ohci_frame_no(ohci) + 2;
+ u16 next = ohci_frame_no(ohci) + 1;
u16 frame = ed->last_iso + ed->interval;
/* Behind the scheduling threshold? */
if (unlikely(tick_before(frame, next))) {
/* USB_ISO_ASAP: Round up to the first available slot */
- if (urb->transfer_flags & URB_ISO_ASAP)
+ if (urb->transfer_flags & URB_ISO_ASAP) {
frame += (next - frame + ed->interval - 1) &
-ed->interval;
* Not ASAP: Use the next slot in the stream. If
* the entire URB falls before the threshold, fail.
*/
- else if (tick_before(frame + ed->interval *
+ } else {
+ if (tick_before(frame + ed->interval *
(urb->number_of_packets - 1), next)) {
- retval = -EXDEV;
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- goto fail;
- }
+ retval = -EXDEV;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ goto fail;
+ }
- /*
- * Some OHCI hardware doesn't handle late TDs
- * correctly. After retiring them it proceeds to
- * the next ED instead of the next TD. Therefore
- * we have to omit the late TDs entirely.
- */
- urb_priv->td_cnt = DIV_ROUND_UP(next - frame,
- ed->interval);
+ /*
+ * Some OHCI hardware doesn't handle late TDs
+ * correctly. After retiring them it proceeds
+ * to the next ED instead of the next TD.
+ * Therefore we have to omit the late TDs
+ * entirely.
+ */
+ urb_priv->td_cnt = DIV_ROUND_UP(
+ (u16) (next - frame),
+ ed->interval);
+ }
}
urb->start_frame = frame;
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
if (!isp1301_i2c_client) {
- ret = -EPROBE_DEFER;
- goto out;
+ return -EPROBE_DEFER;
}
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (usb_disabled()) {
dev_err(&pdev->dev, "USB is disabled\n");
ret = -ENODEV;
- goto out;
+ goto fail_disable;
}
/* Enable AHB slave USB clock, needed for further USB clock control */
if (IS_ERR(usb_pll_clk)) {
dev_err(&pdev->dev, "failed to acquire USB PLL\n");
ret = PTR_ERR(usb_pll_clk);
- goto out1;
+ goto fail_pll;
}
ret = clk_enable(usb_pll_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB PLL\n");
- goto out2;
+ goto fail_pllen;
}
ret = clk_set_rate(usb_pll_clk, 48000);
if (ret < 0) {
dev_err(&pdev->dev, "failed to set USB clock rate\n");
- goto out3;
+ goto fail_rate;
}
/* Enable USB device clock */
if (IS_ERR(usb_dev_clk)) {
dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
ret = PTR_ERR(usb_dev_clk);
- goto out4;
+ goto fail_dev;
}
ret = clk_enable(usb_dev_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
- goto out5;
+ goto fail_deven;
}
/* Enable USB otg clocks */
if (IS_ERR(usb_otg_clk)) {
dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
ret = PTR_ERR(usb_otg_clk);
- goto out6;
+ goto fail_otg;
}
__raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
ret = clk_enable(usb_otg_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
- goto out7;
+ goto fail_otgen;
}
isp1301_configure();
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
- goto out8;
+ goto fail_hcd;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get MEM resource\n");
- ret = -ENOMEM;
- goto out8;
- }
-
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
- goto out8;
+ goto fail_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
- goto out8;
+ goto fail_resource;
}
nxp_start_hc();
return ret;
nxp_stop_hc();
-out8:
+fail_resource:
usb_put_hcd(hcd);
-out7:
+fail_hcd:
clk_disable(usb_otg_clk);
-out6:
+fail_otgen:
clk_put(usb_otg_clk);
-out5:
+fail_otg:
clk_disable(usb_dev_clk);
-out4:
+fail_deven:
clk_put(usb_dev_clk);
-out3:
+fail_dev:
+fail_rate:
clk_disable(usb_pll_clk);
-out2:
+fail_pllen:
clk_put(usb_pll_clk);
-out1:
+fail_pll:
+fail_disable:
isp1301_i2c_client = NULL;
-out:
return ret;
}
/*-------------------------------------------------------------------------*/
-static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32);
-
/*
* configure so an HC device and id are always provided
* always called with process context; sleeping is OK
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &omap_ohci_dma_mask;
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ if (!dev->coherent_dma_mask)
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
dev_name(dev));
MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
-static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32);
-
static int ohci_pxa_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pxa_ohci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
.start_port_reset = ohci_start_port_reset,
};
-static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32);
-
static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
{
const struct hc_driver *driver = &ohci_spear_hc_driver;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &spear_ohci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
int ports, i, retval = 1;
unsigned long flags;
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ /* if !PM_RUNTIME, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
* and usb-storage.
*
* TODO:
- * - usb suspend/resume triggered by sl811 (with USB_SUSPEND)
+ * - usb suspend/resume triggered by sl811 (with PM_RUNTIME)
* - various issues noted in the code
* - performance work; use both register banks; ...
* - use urb->iso_frame_desc[] with ISO transfers
/* auto-stop if nothing connected for 1 second */
if (any_ports_active(uhci))
uhci->rh_state = UHCI_RH_RUNNING;
- else if (time_after_eq(jiffies, uhci->auto_stop_time))
+ else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
+ !uhci->wait_for_hp)
suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
break;
.hub_control = uhci_hub_control,
};
-static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32);
-
static int uhci_hcd_platform_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
* Once we have dma capability bindings this can go away.
*/
if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &platform_uhci_dma_mask;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
pdev->name);
return -EINVAL; /* Can't change the period */
} else {
- next = uhci->frame_number + 2;
+ next = uhci->frame_number + 1;
/* Find the next unused frame */
if (list_empty(&qh->queue)) {
ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
/* Set the max packet size and max burst */
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_burst = 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
- max_packet = usb_endpoint_maxp(&ep->desc);
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */
- max_packet = ep->ss_ep_comp.bMaxBurst;
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
+ max_burst = ep->ss_ep_comp.bMaxBurst;
break;
case USB_SPEED_HIGH:
+ /* Some devices get this wrong */
+ if (usb_endpoint_xfer_bulk(&ep->desc))
+ max_packet = 512;
/* bits 11:12 specify the number of additional transaction
* opportunities per microframe (USB 2.0, section 9.6.6)
*/
usb_endpoint_xfer_int(&ep->desc)) {
max_burst = (usb_endpoint_maxp(&ep->desc)
& 0x1800) >> 11;
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
}
- /* Fall through */
+ break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
break;
default:
BUG();
}
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
+ MAX_BURST(max_burst));
max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
}
spin_unlock_irqrestore(&xhci->lock, flags);
+ if (!xhci->rh_bw)
+ goto no_bw;
+
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
}
}
+no_bw:
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
u32 page_size, temp;
int i;
+ INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
for (i = 0; i < 16; i++) {
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
- INIT_LIST_HEAD(&xhci->cancel_cmd_list);
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
- INIT_LIST_HEAD(&xhci->lpm_failed_devs);
-
/* Enable USB 3.0 device notifications for function remote wake, which
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ /*
+ * Systems with the TI redriver that loses port status change events
+ * need to have the registers polled during D3, so avoid D3cold.
+ */
+ if (xhci_compliance_mode_recovery_timer_quirk_check())
+ pdev->no_d3cold = true;
return xhci_suspend(xhci);
}
* Systems:
* Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
*/
-static bool compliance_mode_recovery_timer_quirk_check(void)
+bool xhci_compliance_mode_recovery_timer_quirk_check(void)
{
const char *dmi_product_name, *dmi_sys_vendor;
xhci_dbg(xhci, "Finished xhci_init\n");
/* Initializing Compliance Mode Recovery Data If Needed */
- if (compliance_mode_recovery_timer_quirk_check()) {
+ if (xhci_compliance_mode_recovery_timer_quirk_check()) {
xhci->quirks |= XHCI_COMP_MODE_QUIRK;
compliance_mode_recovery_timer_init(xhci);
}
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
+ bool comp_timer_running = false;
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
+
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+ !(xhci_all_ports_seen_u0(xhci))) {
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+ xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
+ }
+
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
retval = xhci_init(hcd->primary_hcd);
if (retval)
return retval;
+ comp_timer_running = true;
+
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
if (!retval) {
* to suffer the Compliance Mode issue again. It doesn't matter if
* ports have entered previously to U0 before system's suspension.
*/
- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
/* Re-enable port polling. */
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+/* xHCI quirks */
+bool xhci_compliance_mode_recovery_timer_quirk_check(void);
+
#endif /* __LINUX_XHCI_HCD_H */
if (!config) {
dev_err(&pdev->dev,
"failed to allocate musb hdrc config\n");
+ ret = -ENOMEM;
goto err2;
}
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
bool transfer_pending = false;
- static bool use_sg;
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
* NULL.
*/
if (!urb->transfer_buffer)
- use_sg = true;
+ qh->use_sg = true;
- if (use_sg) {
+ if (qh->use_sg) {
/* sg_miter_start is already done in musb_ep_program */
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
qh->segsize = length;
- if (use_sg) {
+ if (qh->use_sg) {
if (offset + length >= urb->transfer_buffer_length)
- use_sg = false;
+ qh->use_sg = false;
}
musb_ep_select(mbase, epnum);
bool done = false;
u32 status;
struct dma_channel *dma;
- static bool use_sg;
unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
musb_ep_select(mbase, epnum);
* NULL.
*/
if (!urb->transfer_buffer) {
- use_sg = true;
+ qh->use_sg = true;
sg_miter_start(&qh->sg_miter, urb->sg, 1,
sg_flags);
}
- if (use_sg) {
+ if (qh->use_sg) {
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
sg_miter_stop(&qh->sg_miter);
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
- if (use_sg)
- use_sg = false;
+ if (qh->use_sg)
+ qh->use_sg = false;
if (urb->status == -EINPROGRESS)
urb->status = status;
u16 frame; /* for periodic schedule */
unsigned iso_idx; /* in urb->iso_frame_desc[] */
struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */
+ bool use_sg; /* to track urb using sglist */
};
/* map from control or bulk queue head to the first qh on that ring */
glue->control_otghs = omap_get_control_dev();
if (IS_ERR(glue->control_otghs)) {
dev_vdbg(&pdev->dev, "Failed to get control device\n");
- return -ENODEV;
+ ret = PTR_ERR(glue->control_otghs);
+ goto err2;
}
} else {
glue->control_otghs = ERR_PTR(-ENODEV);
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
select USB_OTG
help
Enable this to support Freescale USB OTG transceiver.
tristate "NXP ISP1301 USB transceiver support"
depends on USB || USB_GADGET
depends on I2C
- select USB_OTG_UTILS
help
Say Y here to add support for the NXP ISP1301 USB transceiver driver.
This chip is typically used as USB transceiver for USB host, gadget
config USB_MV_OTG
tristate "Marvell USB OTG support"
- depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND
+ depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME
select USB_OTG
help
Say Y here if you want to build Marvell USB OTG transciever
else if (ab->mode == USB_PERIPHERAL)
ab8500_usb_peri_phy_dis(ab);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
/* initialize the otg structure */
fsl_otg_tc->phy.label = DRIVER_DESC;
+ fsl_otg_tc->phy.dev = &pdev->dev;
fsl_otg_tc->phy.set_power = fsl_otg_set_power;
fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy;
platform_set_drvdata(pdev, gpio_vbus);
gpio_vbus->dev = &pdev->dev;
gpio_vbus->phy.label = "gpio-vbus";
+ gpio_vbus->phy.dev = gpio_vbus->dev;
gpio_vbus->phy.set_power = gpio_vbus_set_power;
gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend;
gpio_vbus->phy.state = OTG_STATE_UNDEFINED;
gpio_free(pdata->gpio_pullup);
gpio_free(pdata->gpio_vbus);
err_gpio:
- platform_set_drvdata(pdev, NULL);
kfree(gpio_vbus->phy.otg);
kfree(gpio_vbus);
return err;
if (gpio_is_valid(pdata->gpio_pullup))
gpio_free(pdata->gpio_pullup);
gpio_free(gpio);
- platform_set_drvdata(pdev, NULL);
kfree(gpio_vbus->phy.otg);
kfree(gpio_vbus);
mutex_init(&isp->mutex);
phy = &isp->phy;
+ phy->dev = &client->dev;
phy->label = DRV_NAME;
phy->init = isp1301_phy_init;
phy->set_vbus = isp1301_phy_set_vbus;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing mem resource\n");
- return -ENODEV;
- }
-
phy_base = devm_ioremap_resource(dev, res);
if (IS_ERR(phy_base))
return PTR_ERR(phy_base);
mv_otg_disable(mvotg);
usb_remove_phy(&mvotg->phy);
- platform_set_drvdata(pdev, NULL);
return 0;
}
flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
- platform_set_drvdata(pdev, NULL);
-
return retval;
}
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't get device resources\n");
- return -ENOENT;
- }
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
mxs_phy->phy.set_suspend = mxs_phy_suspend;
mxs_phy->phy.notify_connect = mxs_phy_on_connect;
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
+ mxs_phy->phy.type = USB_PHY_TYPE_USB2;
ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier);
usb_remove_phy(&mxs_phy->phy);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
usb_remove_phy(&nop->phy);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
int ret;
phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!phy_mem) {
- dev_err(dev, "%s: missing mem resource\n", __func__);
- return -ENODEV;
- }
-
phy_base = devm_ioremap_resource(dev, phy_mem);
if (IS_ERR(phy_base))
return PTR_ERR(phy_base);
int ret;
phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!phy_mem) {
- dev_err(dev, "%s: missing mem resource\n", __func__);
- return -ENODEV;
- }
-
phy_base = devm_ioremap_resource(dev, phy_mem);
if (IS_ERR(phy_base))
return PTR_ERR(phy_base);
#define DRIVER_NAME "ark3116"
/* usb timeout of 1 second */
-#define ARK_TIMEOUT (1*HZ)
+#define ARK_TIMEOUT 1000
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ } /* Terminating entry */
};
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
* Cypress serial helper functions
*****************************************************************************/
+/* FRWD Dongle hidcom needs to skip reset and speed checks */
+static inline bool is_frwd(struct usb_device *dev)
+{
+ return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
+ (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
+}
static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
{
if (unstable_bauds)
return new_rate;
+ /* FRWD Dongle uses 115200 bps */
+ if (is_frwd(port->serial->dev))
+ return new_rate;
+
/*
* The general purpose firmware for the Cypress M8 allows for
* a maximum speed of 57600bps (I have no idea whether DeLorme
return -ENOMEM;
}
- usb_reset_configuration(serial->dev);
+ /* Skip reset for FRWD device. It is a workaound:
+ device hangs if it receives SET_CONFIGURE in Configured
+ state. */
+ if (!is_frwd(serial->dev))
+ usb_reset_configuration(serial->dev);
priv->cmd_ctrl = 0;
priv->line_control = 0;
#define VENDOR_ID_CYPRESS 0x04b4
#define PRODUCT_ID_CYPHIDCOM 0x5500
+/* FRWD Dongle - a GPS sports watch */
+#define VENDOR_ID_FRWD 0x6737
+#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
+
/* Powercom UPS, chip CY7C63723 */
#define VENDOR_ID_POWERCOM 0x0d9f
#define PRODUCT_ID_UPS 0x0002
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
{ USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
static int ftdi_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static int ftdi_chars_in_buffer(struct tty_struct *tty);
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static bool ftdi_tx_empty(struct usb_serial_port *port);
+static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2]);
static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
- .chars_in_buffer = ftdi_chars_in_buffer,
+ .tx_empty = ftdi_tx_empty,
};
static struct usb_serial_driver * const serial_drivers[] = {
}
-static int ftdi_chars_in_buffer(struct tty_struct *tty)
+static bool ftdi_tx_empty(struct usb_serial_port *port)
{
- struct usb_serial_port *port = tty->driver_data;
- int chars;
unsigned char buf[2];
int ret;
- chars = usb_serial_generic_chars_in_buffer(tty);
- if (chars)
- goto out;
-
- /* Check if hardware buffer is empty. */
- ret = ftdi_get_modem_status(tty, buf);
+ ret = ftdi_get_modem_status(port, buf);
if (ret == 2) {
if (!(buf[1] & FTDI_RS_TEMT))
- chars = 1;
+ return false;
}
-out:
- dev_dbg(&port->dev, "%s - %d\n", __func__, chars);
- return chars;
+ return true;
}
/* old_termios contains the original termios settings and tty->termios contains
* Returns the number of status bytes retrieved (device dependant), or
* negative error code.
*/
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2])
{
- struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned char *buf;
int len;
unsigned char buf[2];
int ret;
- ret = ftdi_get_modem_status(tty, buf);
+ ret = ftdi_get_modem_status(port, buf);
if (ret < 0)
return ret;
*/
#define NEWPORT_VID 0x104D
#define NEWPORT_AGILIS_PID 0x3000
+#define NEWPORT_CONEX_CC_PID 0x3002
+#define NEWPORT_CONEX_AGP_PID 0x3006
/* Interbiometrics USB I/O Board */
/* Developed for Interbiometrics by Rudolf Gugler */
}
EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
+void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ unsigned int bps;
+ unsigned long period;
+ unsigned long expire;
+
+ bps = tty_get_baud_rate(tty);
+ if (!bps)
+ bps = 9600; /* B0 */
+ /*
+ * Use a poll-period of roughly the time it takes to send one
+ * character or at least one jiffy.
+ */
+ period = max_t(unsigned long, (10 * HZ / bps), 1);
+ period = min_t(unsigned long, period, timeout);
+
+ dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+ __func__, jiffies_to_msecs(timeout),
+ jiffies_to_msecs(period));
+ expire = jiffies + timeout;
+ while (!port->serial->type->tx_empty(port)) {
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current))
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
+
static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
int index, gfp_t mem_flags)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
- int ret;
-
if (edge_port == NULL)
return 0;
chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
- if (!chars) {
- ret = tx_active(edge_port);
- if (ret > 0)
- chars = ret;
- }
-
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
+static bool edge_tx_empty(struct usb_serial_port *port)
+{
+ struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+ int ret;
+
+ ret = tx_active(edge_port);
+ if (ret > 0)
+ return false;
+
+ return true;
+}
+
static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
+ .tx_empty = edge_tx_empty,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
+ .tx_empty = edge_tx_empty,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress), buf,
- count, &actual, HZ * 1);
+ count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
usb_bulk_msg(serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress), buf,
- count, &actual, HZ * 1);
+ count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
- int outcont_urb;
struct urb *this_urb;
int device_port, err;
d_details = s_priv->device_details;
device_port = port->number - port->serial->minor;
- outcont_urb = d_details->outcont_endpoints[port->number];
this_urb = p_priv->outcont_urb;
dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe));
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
-#if 0
- else {
- dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__
- outcont_urb, this_urb->transfer_buffer_length,
- usb_pipeendpoint(this_urb->pipe));
- }
-#endif
-
return 0;
}
#define DRIVER_DESC "Moschip USB Serial Driver"
/* default urb timeout */
-#define MOS_WDR_TIMEOUT (HZ * 5)
+#define MOS_WDR_TIMEOUT 5000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
__u8 requesttype = (__u8)0xc0;
__u16 index = get_reg_index(reg);
__u16 value = get_reg_value(reg, serial_portnum);
- int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
- index, data, 1, MOS_WDR_TIMEOUT);
- if (status < 0)
+ u8 *buf;
+ int status;
+
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ index, buf, 1, MOS_WDR_TIMEOUT);
+ if (status == 1)
+ *data = *buf;
+ else if (status < 0)
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d", status);
+ kfree(buf);
+
return status;
}
mos7720_port->shadowMCR |= (UART_MCR_XONANY);
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
- if (port->number)
+ if (port_number)
write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
else
write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
+ (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
/* start the interrupt urb */
ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
/* wait for synchronous usb calls to return */
if (mos_parport->msg_pending)
wait_for_completion_timeout(&mos_parport->syncmsg_compl,
- MOS_WDR_TIMEOUT);
+ msecs_to_jiffies(MOS_WDR_TIMEOUT));
parport_remove_port(mos_parport->pp);
usb_set_serial_data(serial, NULL);
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
+ u8 *buf;
__u16 data = 0, mcr_data = 0;
__u16 test_pattern = 0x55AA;
+ int res;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return 0; /* failed to identify 7810 */
/* Store MCR setting */
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
- &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ if (res == VENDOR_READ_LENGTH)
+ mcr_data = *buf;
for (i = 0; i < 16; i++) {
/* Send the 1-bit test pattern out to MCS7810 test pin */
MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
/* Read the test pattern back */
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
- VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ res = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+ VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ if (res == VENDOR_READ_LENGTH)
+ data = *buf;
/* If this is a MCS7810 device, both test patterns must match */
if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
0, MOS_WDR_TIMEOUT);
+ kfree(buf);
+
if (pass_count == 16)
return 1;
static int mos7840_calc_num_ports(struct usb_serial *serial)
{
__u16 data = 0x00;
+ u8 *buf;
int mos7840_num_ports;
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
- VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (buf) {
+ usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+ VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ data = *buf;
+ kfree(buf);
+ }
if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
+#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
-#define ZTE_PRODUCT_CDMA_TECH 0xfffe
-#define ZTE_PRODUCT_AC8710 0xfff1
-#define ZTE_PRODUCT_AC2726 0xfff5
-#define ZTE_PRODUCT_AC8710T 0xffff
#define ZTE_PRODUCT_MC2718 0xffe8
-#define ZTE_PRODUCT_AD3812 0xffeb
-#define ZTE_PRODUCT_MC2716 0xffed
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
#define CINTERION_PRODUCT_EU3_E 0x0051
#define CINTERION_PRODUCT_EU3_P 0x0052
#define CINTERION_PRODUCT_PH8 0x0053
-#define CINTERION_PRODUCT_AH6 0x0055
-#define CINTERION_PRODUCT_PLS8 0x0060
+#define CINTERION_PRODUCT_AHXX 0x0055
+#define CINTERION_PRODUCT_PLXX 0x0060
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
.reserved = BIT(4),
};
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
static const struct option_blacklist_info zte_mc2718_z_blacklist = {
.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
};
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+ /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
{USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
+ {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
/* non Gobi Qualcomm serial devices */
{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */
const unsigned char *data, int count);
static int ti_write_room(struct tty_struct *tty);
static int ti_chars_in_buffer(struct tty_struct *tty);
+static bool ti_tx_empty(struct usb_serial_port *port);
static void ti_throttle(struct tty_struct *tty);
static void ti_unthrottle(struct tty_struct *tty);
static int ti_ioctl(struct tty_struct *tty,
.write = ti_write,
.write_room = ti_write_room,
.chars_in_buffer = ti_chars_in_buffer,
+ .tx_empty = ti_tx_empty,
.throttle = ti_throttle,
.unthrottle = ti_unthrottle,
.ioctl = ti_ioctl,
.write = ti_write,
.write_room = ti_write_room,
.chars_in_buffer = ti_chars_in_buffer,
+ .tx_empty = ti_tx_empty,
.throttle = ti_throttle,
.unthrottle = ti_unthrottle,
.ioctl = ti_ioctl,
struct ti_port *tport = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
- int ret;
- u8 lsr;
if (tport == NULL)
return 0;
chars = kfifo_len(&tport->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
- if (!chars) {
- ret = ti_get_lsr(tport, &lsr);
- if (!ret && !(lsr & TI_LSR_TX_EMPTY))
- chars = 1;
- }
-
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
+static bool ti_tx_empty(struct usb_serial_port *port)
+{
+ struct ti_port *tport = usb_get_serial_port_data(port);
+ int ret;
+ u8 lsr;
+
+ ret = ti_get_lsr(tport, &lsr);
+ if (!ret && !(lsr & TI_LSR_TX_EMPTY))
+ return false;
+
+ return true;
+}
static void ti_throttle(struct tty_struct *tty)
{
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
- int count = 0;
dev_dbg(tty->dev, "%s\n", __func__);
- mutex_lock(&serial->disc_mutex);
- /* if the device was unplugged then any remaining characters
- fell out of the connector ;) */
if (serial->disconnected)
- count = 0;
- else
- count = serial->type->chars_in_buffer(tty);
- mutex_unlock(&serial->disc_mutex);
+ return 0;
- return count;
+ return serial->type->chars_in_buffer(tty);
+}
+
+static void serial_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial *serial = port->serial;
+
+ dev_dbg(tty->dev, "%s\n", __func__);
+
+ if (!port->serial->type->wait_until_sent)
+ return;
+
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
+ port->serial->type->wait_until_sent(tty, timeout);
+ mutex_unlock(&serial->disc_mutex);
}
static void serial_throttle(struct tty_struct *tty)
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
- int retval = -ENODEV;
+ int retval = -ENOIOCTLCMD;
dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd);
default:
if (port->serial->type->ioctl)
retval = port->serial->type->ioctl(tty, cmd, arg);
- else
- retval = -ENOIOCTLCMD;
}
return retval;
.unthrottle = serial_unthrottle,
.break_ctl = serial_break,
.chars_in_buffer = serial_chars_in_buffer,
+ .wait_until_sent = serial_wait_until_sent,
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
.get_icount = serial_get_icount,
set_to_generic_if_null(device, close);
set_to_generic_if_null(device, write_room);
set_to_generic_if_null(device, chars_in_buffer);
+ if (device->tx_empty)
+ set_to_generic_if_null(device, wait_until_sent);
set_to_generic_if_null(device, read_bulk_callback);
set_to_generic_if_null(device, write_bulk_callback);
set_to_generic_if_null(device, process_read_urb);
*/
#define COPY_PORT(dest, src) \
do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \
+ dest->read_urbs[i] = src->read_urbs[i]; \
+ dest->read_urbs[i]->context = dest; \
+ dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
+ } \
dest->read_urb = src->read_urb; \
dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
dest->bulk_in_buffer = src->bulk_in_buffer; \
+ dest->bulk_in_size = src->bulk_in_size; \
dest->interrupt_in_urb = src->interrupt_in_urb; \
+ dest->interrupt_in_urb->context = dest; \
dest->interrupt_in_endpointAddress = \
src->interrupt_in_endpointAddress;\
dest->interrupt_in_buffer = src->interrupt_in_buffer; \
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
- port_settings.port = port->number + 1;
+ port_settings.port = port->number - port->serial->minor + 1;
/* get the byte size */
switch (cflag & CSIZE) {
int len;
unsigned char *buf;
- if (port->number != 0)
- return -ENODEV;
-
buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0001, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 2st cmd and recieve data */
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 3 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 4 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 5 cmd */
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 6 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
kfree(buf);
int len;
unsigned char *buf;
- if (port->number != 0)
- return;
-
buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
if (!buf)
return;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0002, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 3st cmd and recieve data */
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 4 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 5 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 6 cmd */
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 7 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 8 cmd */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
kfree(buf);
}
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */
- { USB_DEVICE(0x19d2, 0xfffe) },
- { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */
+ /* AC8710, AC8710T */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
+ /* AC8700 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
+ /* MG880 */
+ { USB_DEVICE(0x19d2, 0xfffd) },
+ { USB_DEVICE(0x19d2, 0xfffc) },
+ { USB_DEVICE(0x19d2, 0xfffb) },
+ /* AC2726, AC8710_V3 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(0x19d2, 0xfff6) },
+ { USB_DEVICE(0x19d2, 0xfff7) },
+ { USB_DEVICE(0x19d2, 0xfff8) },
+ { USB_DEVICE(0x19d2, 0xfff9) },
+ { USB_DEVICE(0x19d2, 0xffee) },
+ /* AC2716, MC2716 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
+ /* AD3812 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(0x19d2, 0xffec) },
{ USB_DEVICE(0x05C6, 0x3197) },
{ USB_DEVICE(0x05C6, 0x6000) },
+ { USB_DEVICE(0x05C6, 0x9008) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
int status_len;
u32 flag;
-#ifdef CONFIG_REALTEK_AUTOPM
struct us_data *us;
+
+#ifdef CONFIG_REALTEK_AUTOPM
struct timer_list rts51x_suspend_timer;
unsigned long timer_expires;
int pwr_state;
us->extra = chip;
us->extra_destructor = realtek_cr_destructor;
us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
+ chip->us = us;
usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun);
SET_AUTO_DELINK(chip);
}
#ifdef CONFIG_REALTEK_AUTOPM
- if (ss_en) {
- chip->us = us;
+ if (ss_en)
realtek_cr_autosuspend_setup(us);
- }
#endif
usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag);
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
{
- if (MINOR(dev->devt) == 0)
+ if (mode && (MINOR(dev->devt) == 0))
*mode = S_IRUGO | S_IWUGO;
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
*
* Since these may be in userspace, we use (inline) accessors.
*/
+#include <linux/module.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
#include <linux/kernel.h>
return __vringh_need_notify(vrh, getu16_kern);
}
EXPORT_SYMBOL(vringh_need_notify_kern);
+
+MODULE_LICENSE("GPL");
config FB_GOLDFISH
tristate "Goldfish Framebuffer"
- depends on FB
+ depends on FB && HAS_DMA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
+config FB_SIMPLE
+ bool "Simple framebuffer support"
+ depends on (FB = y) && OF
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Say Y if you want support for a simple frame-buffer.
+
+ This driver assumes that the display hardware has been initialized
+ before the kernel boots, and the kernel will simply render to the
+ pre-allocated frame buffer surface.
+
+ Configuration re: surface address, size, and format must be provided
+ through device tree, or potentially plain old platform data in the
+ future.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/exynos/Kconfig"
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
obj-$(CONFIG_FB_MXS) += mxsfb.o
obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
+obj-$(CONFIG_FB_SIMPLE) += simplefb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
static void exit_backlight(struct atmel_lcdfb_info *sinfo)
{
- if (sinfo->backlight)
- backlight_device_unregister(sinfo->backlight);
+ if (!sinfo->backlight)
+ return;
+
+ if (sinfo->backlight->ops) {
+ sinfo->backlight->props.power = FB_BLANK_POWERDOWN;
+ sinfo->backlight->ops->update_status(sinfo->backlight);
+ }
+ backlight_device_unregister(sinfo->backlight);
}
#else
if (info->fix.smem_len) {
unsigned int smem_len = (var->xres_virtual * var->yres_virtual
* ((var->bits_per_pixel + 7) / 8));
- if (smem_len > info->fix.smem_len)
+ if (smem_len > info->fix.smem_len) {
+ dev_err(dev, "Frame buffer is too small (%u) for screen size (need at least %u)\n",
+ info->fix.smem_len, smem_len);
return -EINVAL;
+ }
}
/* Saturate vertical and horizontal timings at maximum values */
font-objs += $(font-objs-y)
+obj-$(CONFIG_FONTS) += font.o
+
# Each configuration option enables a list of files.
obj-$(CONFIG_DUMMY_CONSOLE) += dummycon.o
*
* NOTE: Needed to maintain backwards compatibility
*/
-int fb_get_options(char *name, char **option)
+int fb_get_options(const char *name, char **option)
{
char *opt, *options = NULL;
int retval = 0;
static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
{
- u32 mask = overlay_is_vid(overlay) ? CFG_GRA_ENA_MASK :
- CFG_DMA_ENA_MASK;
- u32 enable = overlay_is_vid(overlay) ? CFG_GRA_ENA(1) : CFG_DMA_ENA(1);
+ u32 mask = overlay_is_vid(overlay) ? CFG_DMA_ENA_MASK :
+ CFG_GRA_ENA_MASK;
+ u32 enable = overlay_is_vid(overlay) ? CFG_DMA_ENA(1) : CFG_GRA_ENA(1);
u32 tmp;
struct mmp_path *path = overlay->path;
struct lcd_regs *regs = path_regs(overlay->path);
/* FIXME: assert addr supported */
- memcpy(&overlay->addr, addr, sizeof(struct mmp_win));
+ memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
writel(addr->phys[0], ®s->g_0);
return overlay->addr.phys[0];
source "drivers/video/omap2/dss/Kconfig"
source "drivers/video/omap2/omapfb/Kconfig"
source "drivers/video/omap2/displays/Kconfig"
+source "drivers/video/omap2/displays-new/Kconfig"
endif
obj-$(CONFIG_OMAP2_DSS) += dss/
obj-y += displays/
+obj-y += displays-new/
obj-$(CONFIG_FB_OMAP2) += omapfb/
--- /dev/null
+menu "OMAP Display Device Drivers (new device model)"
+ depends on OMAP2_DSS
+
+config DISPLAY_ENCODER_TFP410
+ tristate "TFP410 DPI to DVI Encoder"
+ help
+ Driver for TFP410 DPI to DVI encoder.
+
+config DISPLAY_ENCODER_TPD12S015
+ tristate "TPD12S015 HDMI ESD protection and level shifter"
+ help
+ Driver for TPD12S015, which offers HDMI ESD protection and level
+ shifting.
+
+config DISPLAY_CONNECTOR_DVI
+ tristate "DVI Connector"
+ depends on I2C
+ help
+ Driver for a generic DVI connector.
+
+config DISPLAY_CONNECTOR_HDMI
+ tristate "HDMI Connector"
+ help
+ Driver for a generic HDMI connector.
+
+config DISPLAY_CONNECTOR_ANALOG_TV
+ tristate "Analog TV Connector"
+ help
+ Driver for a generic analog TV connector.
+
+config DISPLAY_PANEL_DPI
+ tristate "Generic DPI panel"
+ help
+ Driver for generic DPI panels.
+
+config DISPLAY_PANEL_DSI_CM
+ tristate "Generic DSI Command Mode Panel"
+ help
+ Driver for generic DSI command mode panels.
+
+config DISPLAY_PANEL_SONY_ACX565AKM
+ tristate "ACX565AKM Panel"
+ depends on SPI && BACKLIGHT_CLASS_DEVICE
+ help
+ This is the LCD panel used on Nokia N900
+
+config DISPLAY_PANEL_LGPHILIPS_LB035Q02
+ tristate "LG.Philips LB035Q02 LCD Panel"
+ depends on SPI
+ help
+ LCD Panel used on the Gumstix Overo Palo35
+
+config DISPLAY_PANEL_SHARP_LS037V7DW01
+ tristate "Sharp LS037V7DW01 LCD Panel"
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ LCD Panel used in TI's SDP3430 and EVM boards
+
+config DISPLAY_PANEL_TPO_TD043MTEA1
+ tristate "TPO TD043MTEA1 LCD Panel"
+ depends on SPI
+ help
+ LCD Panel used in OMAP3 Pandora
+
+config DISPLAY_PANEL_NEC_NL8048HL11
+ tristate "NEC NL8048HL11 Panel"
+ depends on SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ This NEC NL8048HL11 panel is TFT LCD used in the
+ Zoom2/3/3630 sdp boards.
+
+endmenu
--- /dev/null
+obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o
+obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o
+obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o
+obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o
+obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
+obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o
+obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o
+obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
+obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
--- /dev/null
+/*
+ * Analog TV Connector driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct device *dev;
+
+ struct omap_video_timings timings;
+
+ enum omap_dss_venc_type connector_type;
+ bool invert_polarity;
+};
+
+#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
+
+static int tvc_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(ddata->dev, "connect\n");
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.atv->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void tvc_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(ddata->dev, "disconnect\n");
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.atv->disconnect(in, dssdev);
+}
+
+static int tvc_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(ddata->dev, "enable\n");
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.atv->set_timings(in, &ddata->timings);
+
+ in->ops.atv->set_type(in, ddata->connector_type);
+ in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity);
+
+ r = in->ops.atv->enable(in);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return r;
+}
+
+static void tvc_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(ddata->dev, "disable\n");
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ in->ops.atv->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void tvc_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->timings = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.atv->set_timings(in, timings);
+}
+
+static void tvc_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->timings;
+}
+
+static int tvc_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.atv->check_timings(in, timings);
+}
+
+static u32 tvc_get_wss(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.atv->get_wss(in);
+}
+
+static int tvc_set_wss(struct omap_dss_device *dssdev, u32 wss)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.atv->set_wss(in, wss);
+}
+
+static struct omap_dss_driver tvc_driver = {
+ .connect = tvc_connect,
+ .disconnect = tvc_disconnect,
+
+ .enable = tvc_enable,
+ .disable = tvc_disable,
+
+ .set_timings = tvc_set_timings,
+ .get_timings = tvc_get_timings,
+ .check_timings = tvc_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+
+ .get_wss = tvc_get_wss,
+ .set_wss = tvc_set_wss,
+};
+
+static int tvc_probe_pdata(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct connector_atv_platform_data *pdata;
+ struct omap_dss_device *in, *dssdev;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "Failed to find video source\n");
+ return -ENODEV;
+ }
+
+ ddata->in = in;
+
+ ddata->connector_type = pdata->connector_type;
+ ddata->invert_polarity = ddata->invert_polarity;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int tvc_probe(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+ ddata->dev = &pdev->dev;
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = tvc_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->timings = omap_dss_pal_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->driver = &tvc_driver;
+ dssdev->dev = &pdev->dev;
+ dssdev->type = OMAP_DISPLAY_TYPE_VENC;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = omap_dss_pal_timings;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+err_reg:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit tvc_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_display(&ddata->dssdev);
+
+ tvc_disable(dssdev);
+ tvc_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct platform_driver tvc_connector_driver = {
+ .probe = tvc_probe,
+ .remove = __exit_p(tvc_remove),
+ .driver = {
+ .name = "connector-analog-tv",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(tvc_connector_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Analog TV Connector driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Generic DVI Connector driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <drm/drm_edid.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+static const struct omap_video_timings dvic_default_timings = {
+ .x_res = 640,
+ .y_res = 480,
+
+ .pixel_clock = 23500,
+
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 7,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct omap_video_timings timings;
+
+ struct i2c_adapter *i2c_adapter;
+};
+
+#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
+
+static int dvic_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dvi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void dvic_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dvi->disconnect(in, dssdev);
+}
+
+static int dvic_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dvi->set_timings(in, &ddata->timings);
+
+ r = in->ops.dvi->enable(in);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void dvic_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ in->ops.dvi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void dvic_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->timings = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dvi->set_timings(in, timings);
+}
+
+static void dvic_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->timings;
+}
+
+static int dvic_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dvi->check_timings(in, timings);
+}
+
+static int dvic_ddc_read(struct i2c_adapter *adapter,
+ unsigned char *buf, u16 count, u8 offset)
+{
+ int r, retries;
+
+ for (retries = 3; retries > 0; retries--) {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = count,
+ .buf = buf,
+ }
+ };
+
+ r = i2c_transfer(adapter, msgs, 2);
+ if (r == 2)
+ return 0;
+
+ if (r != -EAGAIN)
+ break;
+ }
+
+ return r < 0 ? r : -EIO;
+}
+
+static int dvic_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ int r, l, bytes_read;
+
+ if (!ddata->i2c_adapter)
+ return -ENODEV;
+
+ l = min(EDID_LENGTH, len);
+ r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0);
+ if (r)
+ return r;
+
+ bytes_read = l;
+
+ /* if there are extensions, read second block */
+ if (len > EDID_LENGTH && edid[0x7e] > 0) {
+ l = min(EDID_LENGTH, len - EDID_LENGTH);
+
+ r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
+ l, EDID_LENGTH);
+ if (r)
+ return r;
+
+ bytes_read += l;
+ }
+
+ return bytes_read;
+}
+
+static bool dvic_detect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ unsigned char out;
+ int r;
+
+ if (!ddata->i2c_adapter)
+ return true;
+
+ r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0);
+
+ return r == 0;
+}
+
+static struct omap_dss_driver dvic_driver = {
+ .connect = dvic_connect,
+ .disconnect = dvic_disconnect,
+
+ .enable = dvic_enable,
+ .disable = dvic_disable,
+
+ .set_timings = dvic_set_timings,
+ .get_timings = dvic_get_timings,
+ .check_timings = dvic_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+
+ .read_edid = dvic_read_edid,
+ .detect = dvic_detect,
+};
+
+static int dvic_probe_pdata(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct connector_dvi_platform_data *pdata;
+ struct omap_dss_device *in, *dssdev;
+ int i2c_bus_num;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ i2c_bus_num = pdata->i2c_bus_num;
+
+ if (i2c_bus_num != -1) {
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(i2c_bus_num);
+ if (!adapter) {
+ dev_err(&pdev->dev,
+ "Failed to get I2C adapter, bus %d\n",
+ i2c_bus_num);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->i2c_adapter = adapter;
+ }
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "Failed to find video source\n");
+ return -ENODEV;
+ }
+
+ ddata->in = in;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int dvic_probe(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = dvic_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->timings = dvic_default_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->driver = &dvic_driver;
+ dssdev->dev = &pdev->dev;
+ dssdev->type = OMAP_DISPLAY_TYPE_DVI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = dvic_default_timings;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit dvic_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_display(&ddata->dssdev);
+
+ dvic_disable(dssdev);
+ dvic_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
+ return 0;
+}
+
+static struct platform_driver dvi_connector_driver = {
+ .probe = dvic_probe,
+ .remove = __exit_p(dvic_remove),
+ .driver = {
+ .name = "connector-dvi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(dvi_connector_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Generic DVI Connector driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * HDMI Connector driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_edid.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+static const struct omap_video_timings hdmic_default_timings = {
+ .x_res = 640,
+ .y_res = 480,
+ .pixel_clock = 25175,
+ .hsw = 96,
+ .hfp = 16,
+ .hbp = 48,
+ .vsw = 2,
+ .vfp = 11,
+ .vbp = 31,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+
+ .interlace = false,
+};
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct device *dev;
+
+ struct omap_video_timings timings;
+};
+
+#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
+
+static int hdmic_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(ddata->dev, "connect\n");
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.hdmi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void hdmic_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(ddata->dev, "disconnect\n");
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.hdmi->disconnect(in, dssdev);
+}
+
+static int hdmic_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(ddata->dev, "enable\n");
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.hdmi->set_timings(in, &ddata->timings);
+
+ r = in->ops.hdmi->enable(in);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return r;
+}
+
+static void hdmic_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(ddata->dev, "disable\n");
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ in->ops.hdmi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void hdmic_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->timings = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.hdmi->set_timings(in, timings);
+}
+
+static void hdmic_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->timings;
+}
+
+static int hdmic_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->check_timings(in, timings);
+}
+
+static int hdmic_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->read_edid(in, edid, len);
+}
+
+static bool hdmic_detect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->detect(in);
+}
+
+static int hdmic_audio_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ /* enable audio only if the display is active */
+ if (!omapdss_device_is_enabled(dssdev))
+ return -EPERM;
+
+ r = in->ops.hdmi->audio_enable(in);
+ if (r)
+ return r;
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+ return 0;
+}
+
+static void hdmic_audio_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ in->ops.hdmi->audio_disable(in);
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
+}
+
+static int hdmic_audio_start(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ /*
+ * No need to check the panel state. It was checked when trasitioning
+ * to AUDIO_ENABLED.
+ */
+ if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED)
+ return -EPERM;
+
+ r = in->ops.hdmi->audio_start(in);
+ if (r)
+ return r;
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
+
+ return 0;
+}
+
+static void hdmic_audio_stop(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ in->ops.hdmi->audio_stop(in);
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+}
+
+static bool hdmic_audio_supported(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return false;
+
+ return in->ops.hdmi->audio_supported(in);
+}
+
+static int hdmic_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ /* config audio only if the display is active */
+ if (!omapdss_device_is_enabled(dssdev))
+ return -EPERM;
+
+ r = in->ops.hdmi->audio_config(in, audio);
+ if (r)
+ return r;
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
+
+ return 0;
+}
+
+static struct omap_dss_driver hdmic_driver = {
+ .connect = hdmic_connect,
+ .disconnect = hdmic_disconnect,
+
+ .enable = hdmic_enable,
+ .disable = hdmic_disable,
+
+ .set_timings = hdmic_set_timings,
+ .get_timings = hdmic_get_timings,
+ .check_timings = hdmic_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+
+ .read_edid = hdmic_read_edid,
+ .detect = hdmic_detect,
+
+ .audio_enable = hdmic_audio_enable,
+ .audio_disable = hdmic_audio_disable,
+ .audio_start = hdmic_audio_start,
+ .audio_stop = hdmic_audio_stop,
+ .audio_supported = hdmic_audio_supported,
+ .audio_config = hdmic_audio_config,
+};
+
+static int hdmic_probe_pdata(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct connector_hdmi_platform_data *pdata;
+ struct omap_dss_device *in, *dssdev;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "Failed to find video source\n");
+ return -ENODEV;
+ }
+
+ ddata->in = in;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int hdmic_probe(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+ ddata->dev = &pdev->dev;
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = hdmic_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->timings = hdmic_default_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->driver = &hdmic_driver;
+ dssdev->dev = &pdev->dev;
+ dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = hdmic_default_timings;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+err_reg:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit hdmic_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_display(&ddata->dssdev);
+
+ hdmic_disable(dssdev);
+ hdmic_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct platform_driver hdmi_connector_driver = {
+ .probe = hdmic_probe,
+ .remove = __exit_p(hdmic_remove),
+ .driver = {
+ .name = "connector-hdmi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(hdmi_connector_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("HDMI Connector driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * TFP410 DPI-to-DVI encoder driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int pd_gpio;
+ int data_lines;
+
+ struct omap_video_timings timings;
+};
+
+#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
+
+static int tfp410_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return -EBUSY;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ dst->output = dssdev;
+ dssdev->device = dst;
+
+ return 0;
+}
+
+static void tfp410_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ WARN_ON(!omapdss_device_is_connected(dssdev));
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ WARN_ON(dst != dssdev->device);
+ if (dst != dssdev->device)
+ return;
+
+ dst->output = NULL;
+ dssdev->device = NULL;
+
+ in->ops.dpi->disconnect(in, &ddata->dssdev);
+}
+
+static int tfp410_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_timings(in, &ddata->timings);
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ if (gpio_is_valid(ddata->pd_gpio))
+ gpio_set_value_cansleep(ddata->pd_gpio, 1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void tfp410_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ if (gpio_is_valid(ddata->pd_gpio))
+ gpio_set_value_cansleep(ddata->pd_gpio, 0);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void tfp410_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->timings = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void tfp410_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->timings;
+}
+
+static int tfp410_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static const struct omapdss_dvi_ops tfp410_dvi_ops = {
+ .connect = tfp410_connect,
+ .disconnect = tfp410_disconnect,
+
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
+
+ .check_timings = tfp410_check_timings,
+ .set_timings = tfp410_set_timings,
+ .get_timings = tfp410_get_timings,
+};
+
+static int tfp410_probe_pdata(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct encoder_tfp410_platform_data *pdata;
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ ddata->pd_gpio = pdata->power_down_gpio;
+
+ ddata->data_lines = pdata->data_lines;
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "Failed to find video source\n");
+ return -ENODEV;
+ }
+
+ ddata->in = in;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = tfp410_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ if (gpio_is_valid(ddata->pd_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
+ GPIOF_OUT_INIT_LOW, "tfp410 PD");
+ if (r) {
+ dev_err(&pdev->dev, "Failed to request PD GPIO %d\n",
+ ddata->pd_gpio);
+ goto err_gpio;
+ }
+ }
+
+ dssdev = &ddata->dssdev;
+ dssdev->ops.dvi = &tfp410_dvi_ops;
+ dssdev->dev = &pdev->dev;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+ r = omapdss_register_output(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register output\n");
+ goto err_reg;
+ }
+
+ return 0;
+err_reg:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit tfp410_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_output(&ddata->dssdev);
+
+ WARN_ON(omapdss_device_is_enabled(dssdev));
+ if (omapdss_device_is_enabled(dssdev))
+ tfp410_disable(dssdev);
+
+ WARN_ON(omapdss_device_is_connected(dssdev));
+ if (omapdss_device_is_connected(dssdev))
+ tfp410_disconnect(dssdev, dssdev->device);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct platform_driver tfp410_driver = {
+ .probe = tfp410_probe,
+ .remove = __exit_p(tfp410_remove),
+ .driver = {
+ .name = "tfp410",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(tfp410_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * TPD12S015 HDMI ESD protection & level shifter chip driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int ct_cp_hpd_gpio;
+ int ls_oe_gpio;
+ int hpd_gpio;
+
+ struct omap_video_timings timings;
+
+ struct completion hpd_completion;
+};
+
+#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
+
+static irqreturn_t tpd_hpd_irq_handler(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+ bool hpd;
+
+ hpd = gpio_get_value_cansleep(ddata->hpd_gpio);
+
+ dev_dbg(ddata->dssdev.dev, "hpd %d\n", hpd);
+
+ if (gpio_is_valid(ddata->ls_oe_gpio)) {
+ if (hpd)
+ gpio_set_value_cansleep(ddata->ls_oe_gpio, 1);
+ else
+ gpio_set_value_cansleep(ddata->ls_oe_gpio, 0);
+ }
+
+ complete_all(&ddata->hpd_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int tpd_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ r = in->ops.hdmi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ dst->output = dssdev;
+ dssdev->device = dst;
+
+ INIT_COMPLETION(ddata->hpd_completion);
+
+ gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
+ /* DC-DC converter needs at max 300us to get to 90% of 5V */
+ udelay(300);
+
+ /*
+ * If there's a cable connected, wait for the hpd irq to trigger,
+ * which turns on the level shifters.
+ */
+ if (gpio_get_value_cansleep(ddata->hpd_gpio)) {
+ unsigned long to;
+ to = wait_for_completion_timeout(&ddata->hpd_completion,
+ msecs_to_jiffies(250));
+ WARN_ON_ONCE(to == 0);
+ }
+
+ return 0;
+}
+
+static void tpd_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ WARN_ON(dst != dssdev->device);
+
+ if (dst != dssdev->device)
+ return;
+
+ gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
+
+ dst->output = NULL;
+ dssdev->device = NULL;
+
+ in->ops.hdmi->disconnect(in, &ddata->dssdev);
+}
+
+static int tpd_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ in->ops.hdmi->set_timings(in, &ddata->timings);
+
+ r = in->ops.hdmi->enable(in);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return r;
+}
+
+static void tpd_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
+ in->ops.hdmi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void tpd_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->timings = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.hdmi->set_timings(in, timings);
+}
+
+static void tpd_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->timings;
+}
+
+static int tpd_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ r = in->ops.hdmi->check_timings(in, timings);
+
+ return r;
+}
+
+static int tpd_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!gpio_get_value_cansleep(ddata->hpd_gpio))
+ return -ENODEV;
+
+ return in->ops.hdmi->read_edid(in, edid, len);
+}
+
+static bool tpd_detect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ return gpio_get_value_cansleep(ddata->hpd_gpio);
+}
+
+static int tpd_audio_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->audio_enable(in);
+}
+
+static void tpd_audio_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ in->ops.hdmi->audio_disable(in);
+}
+
+static int tpd_audio_start(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->audio_start(in);
+}
+
+static void tpd_audio_stop(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ in->ops.hdmi->audio_stop(in);
+}
+
+static bool tpd_audio_supported(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->audio_supported(in);
+}
+
+static int tpd_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->audio_config(in, audio);
+}
+
+static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
+ .connect = tpd_connect,
+ .disconnect = tpd_disconnect,
+
+ .enable = tpd_enable,
+ .disable = tpd_disable,
+
+ .check_timings = tpd_check_timings,
+ .set_timings = tpd_set_timings,
+ .get_timings = tpd_get_timings,
+
+ .read_edid = tpd_read_edid,
+ .detect = tpd_detect,
+
+ .audio_enable = tpd_audio_enable,
+ .audio_disable = tpd_audio_disable,
+ .audio_start = tpd_audio_start,
+ .audio_stop = tpd_audio_stop,
+ .audio_supported = tpd_audio_supported,
+ .audio_config = tpd_audio_config,
+};
+
+static int tpd_probe_pdata(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct encoder_tpd12s015_platform_data *pdata;
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ ddata->ct_cp_hpd_gpio = pdata->ct_cp_hpd_gpio;
+ ddata->ls_oe_gpio = pdata->ls_oe_gpio;
+ ddata->hpd_gpio = pdata->hpd_gpio;
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "Failed to find video source\n");
+ return -ENODEV;
+ }
+
+ ddata->in = in;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int tpd_probe(struct platform_device *pdev)
+{
+ struct omap_dss_device *in, *dssdev;
+ struct panel_drv_data *ddata;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ init_completion(&ddata->hpd_completion);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = tpd_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ r = devm_gpio_request_one(&pdev->dev, ddata->ct_cp_hpd_gpio,
+ GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd");
+ if (r)
+ goto err_gpio;
+
+ if (gpio_is_valid(ddata->ls_oe_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->ls_oe_gpio,
+ GPIOF_OUT_INIT_LOW, "hdmi_ls_oe");
+ if (r)
+ goto err_gpio;
+ }
+
+ r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
+ GPIOF_DIR_IN, "hdmi_hpd");
+ if (r)
+ goto err_gpio;
+
+ r = devm_request_threaded_irq(&pdev->dev, gpio_to_irq(ddata->hpd_gpio),
+ NULL, tpd_hpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "hpd", ddata);
+ if (r)
+ goto err_irq;
+
+ dssdev = &ddata->dssdev;
+ dssdev->ops.hdmi = &tpd_hdmi_ops;
+ dssdev->dev = &pdev->dev;
+ dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
+ dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ dssdev->owner = THIS_MODULE;
+
+ in = ddata->in;
+
+ r = omapdss_register_output(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register output\n");
+ goto err_reg;
+ }
+
+ return 0;
+err_reg:
+err_irq:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit tpd_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_output(&ddata->dssdev);
+
+ WARN_ON(omapdss_device_is_enabled(dssdev));
+ if (omapdss_device_is_enabled(dssdev))
+ tpd_disable(dssdev);
+
+ WARN_ON(omapdss_device_is_connected(dssdev));
+ if (omapdss_device_is_connected(dssdev))
+ tpd_disconnect(dssdev, dssdev->device);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct platform_driver tpd_driver = {
+ .probe = tpd_probe,
+ .remove = __exit_p(tpd_remove),
+ .driver = {
+ .name = "tpd12s015",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(tpd_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TPD12S015 driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Generic MIPI DPI Panel Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int data_lines;
+
+ struct omap_video_timings videomode;
+
+ int backlight_gpio;
+ int enable_gpio;
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int panel_dpi_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int panel_dpi_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ if (gpio_is_valid(ddata->enable_gpio))
+ gpio_set_value_cansleep(ddata->enable_gpio, 1);
+
+ if (gpio_is_valid(ddata->backlight_gpio))
+ gpio_set_value_cansleep(ddata->backlight_gpio, 1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void panel_dpi_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ if (gpio_is_valid(ddata->enable_gpio))
+ gpio_set_value_cansleep(ddata->enable_gpio, 0);
+
+ if (gpio_is_valid(ddata->backlight_gpio))
+ gpio_set_value_cansleep(ddata->backlight_gpio, 0);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver panel_dpi_ops = {
+ .connect = panel_dpi_connect,
+ .disconnect = panel_dpi_disconnect,
+
+ .enable = panel_dpi_enable,
+ .disable = panel_dpi_disable,
+
+ .set_timings = panel_dpi_set_timings,
+ .get_timings = panel_dpi_get_timings,
+ .check_timings = panel_dpi_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+};
+
+static int panel_dpi_probe_pdata(struct platform_device *pdev)
+{
+ const struct panel_dpi_platform_data *pdata;
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev, *in;
+ struct videomode vm;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ videomode_from_timing(pdata->display_timing, &vm);
+ videomode_to_omap_video_timings(&vm, &ddata->videomode);
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ ddata->enable_gpio = pdata->enable_gpio;
+ ddata->backlight_gpio = pdata->backlight_gpio;
+
+ return 0;
+}
+
+static int panel_dpi_probe(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = panel_dpi_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ if (gpio_is_valid(ddata->enable_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->enable_gpio,
+ GPIOF_OUT_INIT_LOW, "panel enable");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->backlight_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->backlight_gpio,
+ GPIOF_OUT_INIT_LOW, "panel backlight");
+ if (r)
+ goto err_gpio;
+ }
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &pdev->dev;
+ dssdev->driver = &panel_dpi_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+ dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit panel_dpi_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_display(dssdev);
+
+ panel_dpi_disable(dssdev);
+ panel_dpi_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct platform_driver panel_dpi_driver = {
+ .probe = panel_dpi_probe,
+ .remove = __exit_p(panel_dpi_remove),
+ .driver = {
+ .name = "panel-dpi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(panel_dpi_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Generic DSI Command Mode panel driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/* #define DEBUG */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+#include <video/mipi_display.h>
+
+/* DSI Virtual channel. Hardcoded for now. */
+#define TCH 0
+
+#define DCS_READ_NUM_ERRORS 0x05
+#define DCS_BRIGHTNESS 0x51
+#define DCS_CTRL_DISPLAY 0x53
+#define DCS_GET_ID1 0xda
+#define DCS_GET_ID2 0xdb
+#define DCS_GET_ID3 0xdc
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct omap_video_timings timings;
+
+ struct platform_device *pdev;
+
+ struct mutex lock;
+
+ struct backlight_device *bldev;
+
+ unsigned long hw_guard_end; /* next value of jiffies when we can
+ * issue the next sleep in/out command
+ */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ /* panel HW configuration from DT or platform data */
+ int reset_gpio;
+ int ext_te_gpio;
+
+ bool use_dsi_backlight;
+
+ struct omap_dsi_pin_config pin_config;
+
+ /* runtime variables */
+ bool enabled;
+
+ bool te_enabled;
+
+ atomic_t do_update;
+ int channel;
+
+ struct delayed_work te_timeout_work;
+
+ bool intro_printed;
+
+ struct workqueue_struct *workqueue;
+
+ bool ulps_enabled;
+ unsigned ulps_timeout;
+ struct delayed_work ulps_work;
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static irqreturn_t dsicm_te_isr(int irq, void *data);
+static void dsicm_te_timeout_work_callback(struct work_struct *work);
+static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable);
+
+static int dsicm_panel_reset(struct panel_drv_data *ddata);
+
+static void dsicm_ulps_work(struct work_struct *work);
+
+static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
+{
+ ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct panel_drv_data *ddata)
+{
+ unsigned long wait = ddata->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
+{
+ struct omap_dss_device *in = ddata->in;
+ int r;
+ u8 buf[1];
+
+ r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, buf, 1);
+
+ if (r < 0)
+ return r;
+
+ *data = buf[0];
+
+ return 0;
+}
+
+static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
+{
+ struct omap_dss_device *in = ddata->in;
+ return in->ops.dsi->dcs_write(in, ddata->channel, &dcs_cmd, 1);
+}
+
+static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
+{
+ struct omap_dss_device *in = ddata->in;
+ u8 buf[2] = { dcs_cmd, param };
+
+ return in->ops.dsi->dcs_write(in, ddata->channel, buf, 2);
+}
+
+static int dsicm_sleep_in(struct panel_drv_data *ddata)
+
+{
+ struct omap_dss_device *in = ddata->in;
+ u8 cmd;
+ int r;
+
+ hw_guard_wait(ddata);
+
+ cmd = MIPI_DCS_ENTER_SLEEP_MODE;
+ r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, &cmd, 1);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_sleep_out(struct panel_drv_data *ddata)
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
+{
+ int r;
+
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_set_update_window(struct panel_drv_data *ddata,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ struct omap_dss_device *in = ddata->in;
+ int r;
+ u16 x1 = x;
+ u16 x2 = x + w - 1;
+ u16 y1 = y;
+ u16 y2 = y + h - 1;
+
+ u8 buf[5];
+ buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
+ buf[1] = (x1 >> 8) & 0xff;
+ buf[2] = (x1 >> 0) & 0xff;
+ buf[3] = (x2 >> 8) & 0xff;
+ buf[4] = (x2 >> 0) & 0xff;
+
+ r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf));
+ if (r)
+ return r;
+
+ buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
+ buf[1] = (y1 >> 8) & 0xff;
+ buf[2] = (y1 >> 0) & 0xff;
+ buf[3] = (y2 >> 8) & 0xff;
+ buf[4] = (y2 >> 0) & 0xff;
+
+ r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf));
+ if (r)
+ return r;
+
+ in->ops.dsi->bta_sync(in, ddata->channel);
+
+ return r;
+}
+
+static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
+{
+ if (ddata->ulps_timeout > 0)
+ queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
+ msecs_to_jiffies(ddata->ulps_timeout));
+}
+
+static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
+{
+ cancel_delayed_work(&ddata->ulps_work);
+}
+
+static int dsicm_enter_ulps(struct panel_drv_data *ddata)
+{
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (ddata->ulps_enabled)
+ return 0;
+
+ dsicm_cancel_ulps_work(ddata);
+
+ r = _dsicm_enable_te(ddata, false);
+ if (r)
+ goto err;
+
+ if (gpio_is_valid(ddata->ext_te_gpio))
+ disable_irq(gpio_to_irq(ddata->ext_te_gpio));
+
+ in->ops.dsi->disable(in, false, true);
+
+ ddata->ulps_enabled = true;
+
+ return 0;
+
+err:
+ dev_err(&ddata->pdev->dev, "enter ULPS failed");
+ dsicm_panel_reset(ddata);
+
+ ddata->ulps_enabled = false;
+
+ dsicm_queue_ulps_work(ddata);
+
+ return r;
+}
+
+static int dsicm_exit_ulps(struct panel_drv_data *ddata)
+{
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!ddata->ulps_enabled)
+ return 0;
+
+ r = in->ops.dsi->enable(in);
+ if (r) {
+ dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
+ goto err1;
+ }
+
+ in->ops.dsi->enable_hs(in, ddata->channel, true);
+
+ r = _dsicm_enable_te(ddata, true);
+ if (r) {
+ dev_err(&ddata->pdev->dev, "failed to re-enable TE");
+ goto err2;
+ }
+
+ if (gpio_is_valid(ddata->ext_te_gpio))
+ enable_irq(gpio_to_irq(ddata->ext_te_gpio));
+
+ dsicm_queue_ulps_work(ddata);
+
+ ddata->ulps_enabled = false;
+
+ return 0;
+
+err2:
+ dev_err(&ddata->pdev->dev, "failed to exit ULPS");
+
+ r = dsicm_panel_reset(ddata);
+ if (!r) {
+ if (gpio_is_valid(ddata->ext_te_gpio))
+ enable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ ddata->ulps_enabled = false;
+ }
+err1:
+ dsicm_queue_ulps_work(ddata);
+
+ return r;
+}
+
+static int dsicm_wake_up(struct panel_drv_data *ddata)
+{
+ if (ddata->ulps_enabled)
+ return dsicm_exit_ulps(ddata);
+
+ dsicm_cancel_ulps_work(ddata);
+ dsicm_queue_ulps_work(ddata);
+ return 0;
+}
+
+static int dsicm_bl_update_status(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled) {
+ in->ops.dsi->bus_lock(in);
+
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
+
+ in->ops.dsi->bus_unlock(in);
+ } else {
+ r = 0;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_bl_get_intensity(struct backlight_device *dev)
+{
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ return dev->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops dsicm_bl_ops = {
+ .get_brightness = dsicm_bl_get_intensity,
+ .update_status = dsicm_bl_update_status,
+};
+
+static void dsicm_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres)
+{
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
+}
+
+static ssize_t dsicm_num_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *in = ddata->in;
+ u8 errors = 0;
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled) {
+ in->ops.dsi->bus_lock(in);
+
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
+ &errors);
+
+ in->ops.dsi->bus_unlock(in);
+ } else {
+ r = -ENODEV;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t dsicm_hw_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *in = ddata->in;
+ u8 id1, id2, id3;
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled) {
+ in->ops.dsi->bus_lock(in);
+
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+
+ in->ops.dsi->bus_unlock(in);
+ } else {
+ r = -ENODEV;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static ssize_t dsicm_store_ulps(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *in = ddata->in;
+ unsigned long t;
+ int r;
+
+ r = kstrtoul(buf, 0, &t);
+ if (r)
+ return r;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled) {
+ in->ops.dsi->bus_lock(in);
+
+ if (t)
+ r = dsicm_enter_ulps(ddata);
+ else
+ r = dsicm_wake_up(ddata);
+
+ in->ops.dsi->bus_unlock(in);
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return count;
+}
+
+static ssize_t dsicm_show_ulps(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ unsigned t;
+
+ mutex_lock(&ddata->lock);
+ t = ddata->ulps_enabled;
+ mutex_unlock(&ddata->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", t);
+}
+
+static ssize_t dsicm_store_ulps_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *in = ddata->in;
+ unsigned long t;
+ int r;
+
+ r = kstrtoul(buf, 0, &t);
+ if (r)
+ return r;
+
+ mutex_lock(&ddata->lock);
+ ddata->ulps_timeout = t;
+
+ if (ddata->enabled) {
+ /* dsicm_wake_up will restart the timer */
+ in->ops.dsi->bus_lock(in);
+ r = dsicm_wake_up(ddata);
+ in->ops.dsi->bus_unlock(in);
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return count;
+}
+
+static ssize_t dsicm_show_ulps_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ unsigned t;
+
+ mutex_lock(&ddata->lock);
+ t = ddata->ulps_timeout;
+ mutex_unlock(&ddata->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", t);
+}
+
+static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
+static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL);
+static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
+ dsicm_show_ulps, dsicm_store_ulps);
+static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
+ dsicm_show_ulps_timeout, dsicm_store_ulps_timeout);
+
+static struct attribute *dsicm_attrs[] = {
+ &dev_attr_num_dsi_errors.attr,
+ &dev_attr_hw_revision.attr,
+ &dev_attr_ulps.attr,
+ &dev_attr_ulps_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group dsicm_attr_group = {
+ .attrs = dsicm_attrs,
+};
+
+static void dsicm_hw_reset(struct panel_drv_data *ddata)
+{
+ if (!gpio_is_valid(ddata->reset_gpio))
+ return;
+
+ gpio_set_value(ddata->reset_gpio, 1);
+ udelay(10);
+ /* reset the panel */
+ gpio_set_value(ddata->reset_gpio, 0);
+ /* assert reset */
+ udelay(10);
+ gpio_set_value(ddata->reset_gpio, 1);
+ /* wait after releasing reset */
+ usleep_range(5000, 10000);
+}
+
+static int dsicm_power_on(struct panel_drv_data *ddata)
+{
+ struct omap_dss_device *in = ddata->in;
+ u8 id1, id2, id3;
+ int r;
+ struct omap_dss_dsi_config dsi_config = {
+ .mode = OMAP_DSS_DSI_CMD_MODE,
+ .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
+ .timings = &ddata->timings,
+ .hs_clk_min = 150000000,
+ .hs_clk_max = 300000000,
+ .lp_clk_min = 7000000,
+ .lp_clk_max = 10000000,
+ };
+
+ r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
+ if (r) {
+ dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n");
+ goto err0;
+ };
+
+ r = in->ops.dsi->set_config(in, &dsi_config);
+ if (r) {
+ dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
+ goto err0;
+ }
+
+ r = in->ops.dsi->enable(in);
+ if (r) {
+ dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
+ goto err0;
+ }
+
+ dsicm_hw_reset(ddata);
+
+ in->ops.dsi->enable_hs(in, ddata->channel, false);
+
+ r = dsicm_sleep_out(ddata);
+ if (r)
+ goto err;
+
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY,
+ (1<<2) | (1<<5)); /* BL | BCTRL */
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT,
+ MIPI_DCS_PIXEL_FMT_24BIT);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON);
+ if (r)
+ goto err;
+
+ r = _dsicm_enable_te(ddata, ddata->te_enabled);
+ if (r)
+ goto err;
+
+ r = in->ops.dsi->enable_video_output(in, ddata->channel);
+ if (r)
+ goto err;
+
+ ddata->enabled = 1;
+
+ if (!ddata->intro_printed) {
+ dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
+ id1, id2, id3);
+ ddata->intro_printed = true;
+ }
+
+ in->ops.dsi->enable_hs(in, ddata->channel, true);
+
+ return 0;
+err:
+ dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n");
+
+ dsicm_hw_reset(ddata);
+
+ in->ops.dsi->disable(in, true, false);
+err0:
+ return r;
+}
+
+static void dsicm_power_off(struct panel_drv_data *ddata)
+{
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ in->ops.dsi->disable_video_output(in, ddata->channel);
+
+ r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
+ if (!r)
+ r = dsicm_sleep_in(ddata);
+
+ if (r) {
+ dev_err(&ddata->pdev->dev,
+ "error disabling panel, issuing HW reset\n");
+ dsicm_hw_reset(ddata);
+ }
+
+ in->ops.dsi->disable(in, true, false);
+
+ ddata->enabled = 0;
+}
+
+static int dsicm_panel_reset(struct panel_drv_data *ddata)
+{
+ dev_err(&ddata->pdev->dev, "performing LCD reset\n");
+
+ dsicm_power_off(ddata);
+ dsicm_hw_reset(ddata);
+ return dsicm_power_on(ddata);
+}
+
+static int dsicm_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ struct device *dev = &ddata->pdev->dev;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dsi->connect(in, dssdev);
+ if (r) {
+ dev_err(dev, "Failed to connect to video source\n");
+ return r;
+ }
+
+ r = in->ops.dsi->request_vc(ddata->in, &ddata->channel);
+ if (r) {
+ dev_err(dev, "failed to get virtual channel\n");
+ goto err_req_vc;
+ }
+
+ r = in->ops.dsi->set_vc_id(ddata->in, ddata->channel, TCH);
+ if (r) {
+ dev_err(dev, "failed to set VC_ID\n");
+ goto err_vc_id;
+ }
+
+ return 0;
+
+err_vc_id:
+ in->ops.dsi->release_vc(ddata->in, ddata->channel);
+err_req_vc:
+ in->ops.dsi->disconnect(in, dssdev);
+ return r;
+}
+
+static void dsicm_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dsi->release_vc(in, ddata->channel);
+ in->ops.dsi->disconnect(in, dssdev);
+}
+
+static int dsicm_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(&ddata->pdev->dev, "enable\n");
+
+ mutex_lock(&ddata->lock);
+
+ if (!omapdss_device_is_connected(dssdev)) {
+ r = -ENODEV;
+ goto err;
+ }
+
+ if (omapdss_device_is_enabled(dssdev)) {
+ r = 0;
+ goto err;
+ }
+
+ in->ops.dsi->bus_lock(in);
+
+ r = dsicm_power_on(ddata);
+
+ in->ops.dsi->bus_unlock(in);
+
+ if (r)
+ goto err;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+err:
+ dev_dbg(&ddata->pdev->dev, "enable failed\n");
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static void dsicm_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(&ddata->pdev->dev, "disable\n");
+
+ mutex_lock(&ddata->lock);
+
+ dsicm_cancel_ulps_work(ddata);
+
+ in->ops.dsi->bus_lock(in);
+
+ if (omapdss_device_is_enabled(dssdev)) {
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ dsicm_power_off(ddata);
+ }
+
+ in->ops.dsi->bus_unlock(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&ddata->lock);
+}
+
+static void dsicm_framedone_cb(int err, void *data)
+{
+ struct panel_drv_data *ddata = data;
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
+ in->ops.dsi->bus_unlock(ddata->in);
+}
+
+static irqreturn_t dsicm_te_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+ struct omap_dss_device *in = ddata->in;
+ int old;
+ int r;
+
+ old = atomic_cmpxchg(&ddata->do_update, 1, 0);
+
+ if (old) {
+ cancel_delayed_work(&ddata->te_timeout_work);
+
+ r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb,
+ ddata);
+ if (r)
+ goto err;
+ }
+
+ return IRQ_HANDLED;
+err:
+ dev_err(&ddata->pdev->dev, "start update failed\n");
+ in->ops.dsi->bus_unlock(in);
+ return IRQ_HANDLED;
+}
+
+static void dsicm_te_timeout_work_callback(struct work_struct *work)
+{
+ struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
+ te_timeout_work.work);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
+
+ atomic_set(&ddata->do_update, 0);
+ in->ops.dsi->bus_unlock(in);
+}
+
+static int dsicm_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
+
+ mutex_lock(&ddata->lock);
+ in->ops.dsi->bus_lock(in);
+
+ r = dsicm_wake_up(ddata);
+ if (r)
+ goto err;
+
+ if (!ddata->enabled) {
+ r = 0;
+ goto err;
+ }
+
+ /* XXX no need to send this every frame, but dsi break if not done */
+ r = dsicm_set_update_window(ddata, 0, 0,
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+ if (r)
+ goto err;
+
+ if (ddata->te_enabled && gpio_is_valid(ddata->ext_te_gpio)) {
+ schedule_delayed_work(&ddata->te_timeout_work,
+ msecs_to_jiffies(250));
+ atomic_set(&ddata->do_update, 1);
+ } else {
+ r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb,
+ ddata);
+ if (r)
+ goto err;
+ }
+
+ /* note: no bus_unlock here. unlock is in framedone_cb */
+ mutex_unlock(&ddata->lock);
+ return 0;
+err:
+ in->ops.dsi->bus_unlock(in);
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static int dsicm_sync(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->pdev->dev, "sync\n");
+
+ mutex_lock(&ddata->lock);
+ in->ops.dsi->bus_lock(in);
+ in->ops.dsi->bus_unlock(in);
+ mutex_unlock(&ddata->lock);
+
+ dev_dbg(&ddata->pdev->dev, "sync done\n");
+
+ return 0;
+}
+
+static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
+{
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (enable)
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0);
+ else
+ r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
+
+ if (!gpio_is_valid(ddata->ext_te_gpio))
+ in->ops.dsi->enable_te(in, enable);
+
+ /* possible panel bug */
+ msleep(100);
+
+ return r;
+}
+
+static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->te_enabled == enable)
+ goto end;
+
+ in->ops.dsi->bus_lock(in);
+
+ if (ddata->enabled) {
+ r = dsicm_wake_up(ddata);
+ if (r)
+ goto err;
+
+ r = _dsicm_enable_te(ddata, enable);
+ if (r)
+ goto err;
+ }
+
+ ddata->te_enabled = enable;
+
+ in->ops.dsi->bus_unlock(in);
+end:
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+err:
+ in->ops.dsi->bus_unlock(in);
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_get_te(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ int r;
+
+ mutex_lock(&ddata->lock);
+ r = ddata->te_enabled;
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_memory_read(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+ int first = 1;
+ int plen;
+ unsigned buf_used = 0;
+
+ if (size < w * h * 3)
+ return -ENOMEM;
+
+ mutex_lock(&ddata->lock);
+
+ if (!ddata->enabled) {
+ r = -ENODEV;
+ goto err1;
+ }
+
+ size = min(w * h * 3,
+ dssdev->panel.timings.x_res *
+ dssdev->panel.timings.y_res * 3);
+
+ in->ops.dsi->bus_lock(in);
+
+ r = dsicm_wake_up(ddata);
+ if (r)
+ goto err2;
+
+ /* plen 1 or 2 goes into short packet. until checksum error is fixed,
+ * use short packets. plen 32 works, but bigger packets seem to cause
+ * an error. */
+ if (size % 2)
+ plen = 1;
+ else
+ plen = 2;
+
+ dsicm_set_update_window(ddata, x, y, w, h);
+
+ r = in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, plen);
+ if (r)
+ goto err2;
+
+ while (buf_used < size) {
+ u8 dcs_cmd = first ? 0x2e : 0x3e;
+ first = 0;
+
+ r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd,
+ buf + buf_used, size - buf_used);
+
+ if (r < 0) {
+ dev_err(dssdev->dev, "read error\n");
+ goto err3;
+ }
+
+ buf_used += r;
+
+ if (r < plen) {
+ dev_err(&ddata->pdev->dev, "short read\n");
+ break;
+ }
+
+ if (signal_pending(current)) {
+ dev_err(&ddata->pdev->dev, "signal pending, "
+ "aborting memory read\n");
+ r = -ERESTARTSYS;
+ goto err3;
+ }
+ }
+
+ r = buf_used;
+
+err3:
+ in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, 1);
+err2:
+ in->ops.dsi->bus_unlock(in);
+err1:
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static void dsicm_ulps_work(struct work_struct *work)
+{
+ struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
+ ulps_work.work);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ mutex_lock(&ddata->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) {
+ mutex_unlock(&ddata->lock);
+ return;
+ }
+
+ in->ops.dsi->bus_lock(in);
+
+ dsicm_enter_ulps(ddata);
+
+ in->ops.dsi->bus_unlock(in);
+ mutex_unlock(&ddata->lock);
+}
+
+static struct omap_dss_driver dsicm_ops = {
+ .connect = dsicm_connect,
+ .disconnect = dsicm_disconnect,
+
+ .enable = dsicm_enable,
+ .disable = dsicm_disable,
+
+ .update = dsicm_update,
+ .sync = dsicm_sync,
+
+ .get_resolution = dsicm_get_resolution,
+ .get_recommended_bpp = omapdss_default_get_recommended_bpp,
+
+ .enable_te = dsicm_enable_te,
+ .get_te = dsicm_get_te,
+
+ .memory_read = dsicm_memory_read,
+};
+
+static int dsicm_probe_pdata(struct platform_device *pdev)
+{
+ const struct panel_dsicm_platform_data *pdata;
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return -EPROBE_DEFER;
+ }
+ ddata->in = in;
+
+ ddata->reset_gpio = pdata->reset_gpio;
+
+ if (pdata->use_ext_te)
+ ddata->ext_te_gpio = pdata->ext_te_gpio;
+ else
+ ddata->ext_te_gpio = -1;
+
+ ddata->ulps_timeout = pdata->ulps_timeout;
+
+ ddata->use_dsi_backlight = pdata->use_dsi_backlight;
+
+ ddata->pin_config = pdata->pin_config;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int dsicm_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props;
+ struct panel_drv_data *ddata;
+ struct backlight_device *bldev = NULL;
+ struct device *dev = &pdev->dev;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ dev_dbg(dev, "probe\n");
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+ ddata->pdev = pdev;
+
+ if (dev_get_platdata(dev)) {
+ r = dsicm_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->timings.x_res = 864;
+ ddata->timings.y_res = 480;
+ ddata->timings.pixel_clock = DIV_ROUND_UP(864 * 480 * 60, 1000);
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = dev;
+ dssdev->driver = &dsicm_ops;
+ dssdev->panel.timings = ddata->timings;
+ dssdev->type = OMAP_DISPLAY_TYPE_DSI;
+ dssdev->owner = THIS_MODULE;
+
+ dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
+ dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
+ OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ mutex_init(&ddata->lock);
+
+ atomic_set(&ddata->do_update, 0);
+
+ if (gpio_is_valid(ddata->reset_gpio)) {
+ r = devm_gpio_request_one(dev, ddata->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "taal rst");
+ if (r) {
+ dev_err(dev, "failed to request reset gpio\n");
+ return r;
+ }
+ }
+
+ if (gpio_is_valid(ddata->ext_te_gpio)) {
+ r = devm_gpio_request_one(dev, ddata->ext_te_gpio,
+ GPIOF_IN, "taal irq");
+ if (r) {
+ dev_err(dev, "GPIO request failed\n");
+ return r;
+ }
+
+ r = devm_request_irq(dev, gpio_to_irq(ddata->ext_te_gpio),
+ dsicm_te_isr,
+ IRQF_TRIGGER_RISING,
+ "taal vsync", ddata);
+
+ if (r) {
+ dev_err(dev, "IRQ request failed\n");
+ return r;
+ }
+
+ INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
+ dsicm_te_timeout_work_callback);
+
+ dev_dbg(dev, "Using GPIO TE\n");
+ }
+
+ ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
+ if (ddata->workqueue == NULL) {
+ dev_err(dev, "can't create workqueue\n");
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
+
+ dsicm_hw_reset(ddata);
+
+ if (ddata->use_dsi_backlight) {
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 255;
+
+ props.type = BACKLIGHT_RAW;
+ bldev = backlight_device_register(dev_name(dev),
+ dev, ddata, &dsicm_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err_bl;
+ }
+
+ ddata->bldev = bldev;
+
+ bldev->props.fb_blank = FB_BLANK_UNBLANK;
+ bldev->props.power = FB_BLANK_UNBLANK;
+ bldev->props.brightness = 255;
+
+ dsicm_bl_update_status(bldev);
+ }
+
+ r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
+ if (r) {
+ dev_err(dev, "failed to create sysfs files\n");
+ goto err_sysfs_create;
+ }
+
+ return 0;
+
+err_sysfs_create:
+ if (bldev != NULL)
+ backlight_device_unregister(bldev);
+err_bl:
+ destroy_workqueue(ddata->workqueue);
+err_reg:
+ return r;
+}
+
+static int __exit dsicm_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct backlight_device *bldev;
+
+ dev_dbg(&pdev->dev, "remove\n");
+
+ omapdss_unregister_display(dssdev);
+
+ dsicm_disable(dssdev);
+ dsicm_disconnect(dssdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
+
+ bldev = ddata->bldev;
+ if (bldev != NULL) {
+ bldev->props.power = FB_BLANK_POWERDOWN;
+ dsicm_bl_update_status(bldev);
+ backlight_device_unregister(bldev);
+ }
+
+ omap_dss_put_device(ddata->in);
+
+ dsicm_cancel_ulps_work(ddata);
+ destroy_workqueue(ddata->workqueue);
+
+ /* reset, to be sure that the panel is in a valid state */
+ dsicm_hw_reset(ddata);
+
+ return 0;
+}
+
+static struct platform_driver dsicm_driver = {
+ .probe = dsicm_probe,
+ .remove = __exit_p(dsicm_remove),
+ .driver = {
+ .name = "panel-dsi-cm",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(dsicm_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * LG.Philips LB035Q02 LCD Panel driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ * Based on a driver by: Steve Sakoman <steve@sakoman.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+static struct omap_video_timings lb035q02_timings = {
+ .x_res = 320,
+ .y_res = 240,
+
+ .pixel_clock = 6500,
+
+ .hsw = 2,
+ .hfp = 20,
+ .hbp = 68,
+
+ .vsw = 2,
+ .vfp = 4,
+ .vbp = 18,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct spi_device *spi;
+
+ int data_lines;
+
+ struct omap_video_timings videomode;
+
+ int reset_gpio;
+ int backlight_gpio;
+ int enable_gpio;
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
+{
+ struct spi_message msg;
+ struct spi_transfer index_xfer = {
+ .len = 3,
+ .cs_change = 1,
+ };
+ struct spi_transfer value_xfer = {
+ .len = 3,
+ };
+ u8 buffer[16];
+
+ spi_message_init(&msg);
+
+ /* register index */
+ buffer[0] = 0x70;
+ buffer[1] = 0x00;
+ buffer[2] = reg & 0x7f;
+ index_xfer.tx_buf = buffer;
+ spi_message_add_tail(&index_xfer, &msg);
+
+ /* register value */
+ buffer[4] = 0x72;
+ buffer[5] = val >> 8;
+ buffer[6] = val;
+ value_xfer.tx_buf = buffer + 4;
+ spi_message_add_tail(&value_xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static void init_lb035q02_panel(struct spi_device *spi)
+{
+ /* Init sequence from page 28 of the lb035q02 spec */
+ lb035q02_write_reg(spi, 0x01, 0x6300);
+ lb035q02_write_reg(spi, 0x02, 0x0200);
+ lb035q02_write_reg(spi, 0x03, 0x0177);
+ lb035q02_write_reg(spi, 0x04, 0x04c7);
+ lb035q02_write_reg(spi, 0x05, 0xffc0);
+ lb035q02_write_reg(spi, 0x06, 0xe806);
+ lb035q02_write_reg(spi, 0x0a, 0x4008);
+ lb035q02_write_reg(spi, 0x0b, 0x0000);
+ lb035q02_write_reg(spi, 0x0d, 0x0030);
+ lb035q02_write_reg(spi, 0x0e, 0x2800);
+ lb035q02_write_reg(spi, 0x0f, 0x0000);
+ lb035q02_write_reg(spi, 0x16, 0x9f80);
+ lb035q02_write_reg(spi, 0x17, 0x0a0f);
+ lb035q02_write_reg(spi, 0x1e, 0x00c1);
+ lb035q02_write_reg(spi, 0x30, 0x0300);
+ lb035q02_write_reg(spi, 0x31, 0x0007);
+ lb035q02_write_reg(spi, 0x32, 0x0000);
+ lb035q02_write_reg(spi, 0x33, 0x0000);
+ lb035q02_write_reg(spi, 0x34, 0x0707);
+ lb035q02_write_reg(spi, 0x35, 0x0004);
+ lb035q02_write_reg(spi, 0x36, 0x0302);
+ lb035q02_write_reg(spi, 0x37, 0x0202);
+ lb035q02_write_reg(spi, 0x3a, 0x0a0d);
+ lb035q02_write_reg(spi, 0x3b, 0x0806);
+}
+
+static int lb035q02_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ init_lb035q02_panel(ddata->spi);
+
+ return 0;
+}
+
+static void lb035q02_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int lb035q02_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ if (gpio_is_valid(ddata->enable_gpio))
+ gpio_set_value_cansleep(ddata->enable_gpio, 1);
+
+ if (gpio_is_valid(ddata->backlight_gpio))
+ gpio_set_value_cansleep(ddata->backlight_gpio, 1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void lb035q02_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ if (gpio_is_valid(ddata->enable_gpio))
+ gpio_set_value_cansleep(ddata->enable_gpio, 0);
+
+ if (gpio_is_valid(ddata->backlight_gpio))
+ gpio_set_value_cansleep(ddata->backlight_gpio, 0);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void lb035q02_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void lb035q02_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int lb035q02_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver lb035q02_ops = {
+ .connect = lb035q02_connect,
+ .disconnect = lb035q02_disconnect,
+
+ .enable = lb035q02_enable,
+ .disable = lb035q02_disable,
+
+ .set_timings = lb035q02_set_timings,
+ .get_timings = lb035q02_get_timings,
+ .check_timings = lb035q02_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+};
+
+static int lb035q02_probe_pdata(struct spi_device *spi)
+{
+ const struct panel_lb035q02_platform_data *pdata;
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&spi->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&spi->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ ddata->enable_gpio = pdata->enable_gpio;
+ ddata->backlight_gpio = pdata->backlight_gpio;
+
+ return 0;
+}
+
+static int lb035q02_panel_spi_probe(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ddata);
+
+ ddata->spi = spi;
+
+ if (dev_get_platdata(&spi->dev)) {
+ r = lb035q02_probe_pdata(spi);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ if (gpio_is_valid(ddata->enable_gpio)) {
+ r = devm_gpio_request_one(&spi->dev, ddata->enable_gpio,
+ GPIOF_OUT_INIT_LOW, "panel enable");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->backlight_gpio)) {
+ r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
+ GPIOF_OUT_INIT_LOW, "panel backlight");
+ if (r)
+ goto err_gpio;
+ }
+
+ ddata->videomode = lb035q02_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &spi->dev;
+ dssdev->driver = &lb035q02_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+ dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&spi->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int lb035q02_panel_spi_remove(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_display(dssdev);
+
+ lb035q02_disable(dssdev);
+ lb035q02_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct spi_driver lb035q02_spi_driver = {
+ .probe = lb035q02_panel_spi_probe,
+ .remove = lb035q02_panel_spi_remove,
+ .driver = {
+ .name = "panel_lgphilips_lb035q02",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(lb035q02_spi_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * NEC NL8048HL11 Panel driver
+ *
+ * Copyright (C) 2010 Texas Instruments Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct omap_video_timings videomode;
+
+ int data_lines;
+
+ int res_gpio;
+ int qvga_gpio;
+
+ struct spi_device *spi;
+};
+
+#define LCD_XRES 800
+#define LCD_YRES 480
+/*
+ * NEC PIX Clock Ratings
+ * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
+ */
+#define LCD_PIXEL_CLOCK 23800
+
+static const struct {
+ unsigned char addr;
+ unsigned char dat;
+} nec_8048_init_seq[] = {
+ { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
+ { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
+ { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 },
+ { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
+ { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F },
+ { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F },
+ { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F },
+ { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
+ { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 },
+ { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C },
+ { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
+ { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
+ { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
+ { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
+ { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
+ { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
+ { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
+ { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
+};
+
+static const struct omap_video_timings nec_8048_panel_timings = {
+ .x_res = LCD_XRES,
+ .y_res = LCD_YRES,
+ .pixel_clock = LCD_PIXEL_CLOCK,
+ .hfp = 6,
+ .hsw = 1,
+ .hbp = 4,
+ .vfp = 3,
+ .vsw = 1,
+ .vbp = 4,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
+ unsigned char reg_data)
+{
+ int ret = 0;
+ unsigned int cmd = 0, data = 0;
+
+ cmd = 0x0000 | reg_addr; /* register address write */
+ data = 0x0100 | reg_data; /* register data write */
+ data = (cmd << 16) | data;
+
+ ret = spi_write(spi, (unsigned char *)&data, 4);
+ if (ret)
+ pr_err("error in spi_write %x\n", data);
+
+ return ret;
+}
+
+static int init_nec_8048_wvga_lcd(struct spi_device *spi)
+{
+ unsigned int i;
+ /* Initialization Sequence */
+ /* nec_8048_spi_send(spi, REG, VAL) */
+ for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
+ nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+ nec_8048_init_seq[i].dat);
+ udelay(20);
+ nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+ nec_8048_init_seq[i].dat);
+ return 0;
+}
+
+static int nec_8048_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void nec_8048_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int nec_8048_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ if (gpio_is_valid(ddata->res_gpio))
+ gpio_set_value_cansleep(ddata->res_gpio, 1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void nec_8048_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ if (gpio_is_valid(ddata->res_gpio))
+ gpio_set_value_cansleep(ddata->res_gpio, 0);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void nec_8048_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void nec_8048_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int nec_8048_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver nec_8048_ops = {
+ .connect = nec_8048_connect,
+ .disconnect = nec_8048_disconnect,
+
+ .enable = nec_8048_enable,
+ .disable = nec_8048_disable,
+
+ .set_timings = nec_8048_set_timings,
+ .get_timings = nec_8048_get_timings,
+ .check_timings = nec_8048_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+};
+
+
+static int nec_8048_probe_pdata(struct spi_device *spi)
+{
+ const struct panel_nec_nl8048hl11_platform_data *pdata;
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&spi->dev);
+
+ ddata->qvga_gpio = pdata->qvga_gpio;
+ ddata->res_gpio = pdata->res_gpio;
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&spi->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int nec_8048_probe(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 32;
+
+ r = spi_setup(spi);
+ if (r < 0) {
+ dev_err(&spi->dev, "spi_setup failed: %d\n", r);
+ return r;
+ }
+
+ init_nec_8048_wvga_lcd(spi);
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ddata);
+
+ ddata->spi = spi;
+
+ if (dev_get_platdata(&spi->dev)) {
+ r = nec_8048_probe_pdata(spi);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ if (gpio_is_valid(ddata->qvga_gpio)) {
+ r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd QVGA");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->res_gpio)) {
+ r = devm_gpio_request_one(&spi->dev, ddata->res_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd RES");
+ if (r)
+ goto err_gpio;
+ }
+
+ ddata->videomode = nec_8048_panel_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &spi->dev;
+ dssdev->driver = &nec_8048_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&spi->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int nec_8048_remove(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->spi->dev, "%s\n", __func__);
+
+ omapdss_unregister_display(dssdev);
+
+ nec_8048_disable(dssdev);
+ nec_8048_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int nec_8048_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ nec_8048_spi_send(spi, 2, 0x01);
+ mdelay(40);
+
+ return 0;
+}
+
+static int nec_8048_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ /* reinitialize the panel */
+ spi_setup(spi);
+ nec_8048_spi_send(spi, 2, 0x00);
+ init_nec_8048_wvga_lcd(spi);
+
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(nec_8048_pm_ops, nec_8048_suspend,
+ nec_8048_resume);
+#define NEC_8048_PM_OPS (&nec_8048_pm_ops)
+#else
+#define NEC_8048_PM_OPS NULL
+#endif
+
+static struct spi_driver nec_8048_driver = {
+ .driver = {
+ .name = "panel-nec-nl8048hl11",
+ .owner = THIS_MODULE,
+ .pm = NEC_8048_PM_OPS,
+ },
+ .probe = nec_8048_probe,
+ .remove = nec_8048_remove,
+};
+
+module_spi_driver(nec_8048_driver);
+
+MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
+MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * LCD panel driver for Sharp LS037V7DW01
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int data_lines;
+
+ struct omap_video_timings videomode;
+
+ int resb_gpio;
+ int ini_gpio;
+ int mo_gpio;
+ int lr_gpio;
+ int ud_gpio;
+};
+
+static const struct omap_video_timings sharp_ls_timings = {
+ .x_res = 480,
+ .y_res = 640,
+
+ .pixel_clock = 19200,
+
+ .hsw = 2,
+ .hfp = 1,
+ .hbp = 28,
+
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 1,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int sharp_ls_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int sharp_ls_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ /* wait couple of vsyncs until enabling the LCD */
+ msleep(50);
+
+ if (gpio_is_valid(ddata->resb_gpio))
+ gpio_set_value_cansleep(ddata->resb_gpio, 1);
+
+ if (gpio_is_valid(ddata->ini_gpio))
+ gpio_set_value_cansleep(ddata->ini_gpio, 1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void sharp_ls_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ if (gpio_is_valid(ddata->ini_gpio))
+ gpio_set_value_cansleep(ddata->ini_gpio, 0);
+
+ if (gpio_is_valid(ddata->resb_gpio))
+ gpio_set_value_cansleep(ddata->resb_gpio, 0);
+
+ /* wait at least 5 vsyncs after disabling the LCD */
+
+ msleep(100);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver sharp_ls_ops = {
+ .connect = sharp_ls_connect,
+ .disconnect = sharp_ls_disconnect,
+
+ .enable = sharp_ls_enable,
+ .disable = sharp_ls_disable,
+
+ .set_timings = sharp_ls_set_timings,
+ .get_timings = sharp_ls_get_timings,
+ .check_timings = sharp_ls_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+};
+
+static int sharp_ls_probe_pdata(struct platform_device *pdev)
+{
+ const struct panel_sharp_ls037v7dw01_platform_data *pdata;
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&pdev->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ ddata->resb_gpio = pdata->resb_gpio;
+ ddata->ini_gpio = pdata->ini_gpio;
+ ddata->mo_gpio = pdata->mo_gpio;
+ ddata->lr_gpio = pdata->lr_gpio;
+ ddata->ud_gpio = pdata->ud_gpio;
+
+ return 0;
+}
+
+static int sharp_ls_probe(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ r = sharp_ls_probe_pdata(pdev);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ if (gpio_is_valid(ddata->mo_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->mo_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd MO");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->lr_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->lr_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd LR");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->ud_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->ud_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd UD");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->resb_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->resb_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd RESB");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->ini_gpio)) {
+ r = devm_gpio_request_one(&pdev->dev, ddata->ini_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd INI");
+ if (r)
+ goto err_gpio;
+ }
+
+ ddata->videomode = sharp_ls_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &pdev->dev;
+ dssdev->driver = &sharp_ls_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+ dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit sharp_ls_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_display(dssdev);
+
+ sharp_ls_disable(dssdev);
+ sharp_ls_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct platform_driver sharp_ls_driver = {
+ .probe = sharp_ls_probe,
+ .remove = __exit_p(sharp_ls_remove),
+ .driver = {
+ .name = "panel-sharp-ls037v7dw01",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(sharp_ls_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Sony ACX565AKM LCD Panel driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Original Driver Author: Imre Deak <imre.deak@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+#define MIPID_CMD_READ_DISP_ID 0x04
+#define MIPID_CMD_READ_RED 0x06
+#define MIPID_CMD_READ_GREEN 0x07
+#define MIPID_CMD_READ_BLUE 0x08
+#define MIPID_CMD_READ_DISP_STATUS 0x09
+#define MIPID_CMD_RDDSDR 0x0F
+#define MIPID_CMD_SLEEP_IN 0x10
+#define MIPID_CMD_SLEEP_OUT 0x11
+#define MIPID_CMD_DISP_OFF 0x28
+#define MIPID_CMD_DISP_ON 0x29
+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
+#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
+#define MIPID_CMD_WRITE_CTRL_DISP 0x53
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
+#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
+
+#define MIPID_CMD_READ_CTRL_DISP 0x54
+#define MIPID_CMD_WRITE_CABC 0x55
+#define MIPID_CMD_READ_CABC 0x56
+
+#define MIPID_VER_LPH8923 3
+#define MIPID_VER_LS041Y3 4
+#define MIPID_VER_L4F00311 8
+#define MIPID_VER_ACX565AKM 9
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int reset_gpio;
+ int datapairs;
+
+ struct omap_video_timings videomode;
+
+ char *name;
+ int enabled;
+ int model;
+ int revision;
+ u8 display_id[3];
+ unsigned has_bc:1;
+ unsigned has_cabc:1;
+ unsigned cabc_mode;
+ unsigned long hw_guard_end; /* next value of jiffies
+ when we can issue the
+ next sleep in/out command */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ struct spi_device *spi;
+ struct mutex mutex;
+
+ struct backlight_device *bl_dev;
+};
+
+static const struct omap_video_timings acx565akm_panel_timings = {
+ .x_res = 800,
+ .y_res = 480,
+ .pixel_clock = 24000,
+ .hfp = 28,
+ .hsw = 4,
+ .hbp = 24,
+ .vfp = 3,
+ .vsw = 3,
+ .vbp = 4,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static void acx565akm_transfer(struct panel_drv_data *ddata, int cmd,
+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[5];
+ int r;
+
+ BUG_ON(ddata->spi == NULL);
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ if (rlen > 1 && wlen == 0) {
+ /*
+ * Between the command and the response data there is a
+ * dummy clock cycle. Add an extra bit after the command
+ * word to account for this.
+ */
+ x->bits_per_word = 10;
+ cmd <<= 1;
+ }
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = rbuf;
+ x->len = rlen;
+ spi_message_add_tail(x, &m);
+ }
+
+ r = spi_sync(ddata->spi, &m);
+ if (r < 0)
+ dev_dbg(&ddata->spi->dev, "spi_sync %d\n", r);
+}
+
+static inline void acx565akm_cmd(struct panel_drv_data *ddata, int cmd)
+{
+ acx565akm_transfer(ddata, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct panel_drv_data *ddata,
+ int reg, const u8 *buf, int len)
+{
+ acx565akm_transfer(ddata, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct panel_drv_data *ddata,
+ int reg, u8 *buf, int len)
+{
+ acx565akm_transfer(ddata, reg, NULL, 0, buf, len);
+}
+
+static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
+{
+ ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct panel_drv_data *ddata)
+{
+ unsigned long wait = ddata->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static void set_sleep_mode(struct panel_drv_data *ddata, int on)
+{
+ int cmd;
+
+ if (on)
+ cmd = MIPID_CMD_SLEEP_IN;
+ else
+ cmd = MIPID_CMD_SLEEP_OUT;
+ /*
+ * We have to keep 120msec between sleep in/out commands.
+ * (8.2.15, 8.2.16).
+ */
+ hw_guard_wait(ddata);
+ acx565akm_cmd(ddata, cmd);
+ hw_guard_start(ddata, 120);
+}
+
+static void set_display_state(struct panel_drv_data *ddata, int enabled)
+{
+ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
+
+ acx565akm_cmd(ddata, cmd);
+}
+
+static int panel_enabled(struct panel_drv_data *ddata)
+{
+ u32 disp_status;
+ int enabled;
+
+ acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS,
+ (u8 *)&disp_status, 4);
+ disp_status = __be32_to_cpu(disp_status);
+ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
+ dev_dbg(&ddata->spi->dev,
+ "LCD panel %senabled by bootloader (status 0x%04x)\n",
+ enabled ? "" : "not ", disp_status);
+ return enabled;
+}
+
+static int panel_detect(struct panel_drv_data *ddata)
+{
+ acx565akm_read(ddata, MIPID_CMD_READ_DISP_ID, ddata->display_id, 3);
+ dev_dbg(&ddata->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ ddata->display_id[0],
+ ddata->display_id[1],
+ ddata->display_id[2]);
+
+ switch (ddata->display_id[0]) {
+ case 0x10:
+ ddata->model = MIPID_VER_ACX565AKM;
+ ddata->name = "acx565akm";
+ ddata->has_bc = 1;
+ ddata->has_cabc = 1;
+ break;
+ case 0x29:
+ ddata->model = MIPID_VER_L4F00311;
+ ddata->name = "l4f00311";
+ break;
+ case 0x45:
+ ddata->model = MIPID_VER_LPH8923;
+ ddata->name = "lph8923";
+ break;
+ case 0x83:
+ ddata->model = MIPID_VER_LS041Y3;
+ ddata->name = "ls041y3";
+ break;
+ default:
+ ddata->name = "unknown";
+ dev_err(&ddata->spi->dev, "invalid display ID\n");
+ return -ENODEV;
+ }
+
+ ddata->revision = ddata->display_id[1];
+
+ dev_info(&ddata->spi->dev, "omapfb: %s rev %02x LCD detected\n",
+ ddata->name, ddata->revision);
+
+ return 0;
+}
+
+/*----------------------Backlight Control-------------------------*/
+
+static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable)
+{
+ u16 ctrl;
+
+ acx565akm_read(ddata, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
+ if (enable) {
+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON;
+ } else {
+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON);
+ }
+
+ ctrl |= 1 << 8;
+ acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
+}
+
+static void set_cabc_mode(struct panel_drv_data *ddata, unsigned mode)
+{
+ u16 cabc_ctrl;
+
+ ddata->cabc_mode = mode;
+ if (!ddata->enabled)
+ return;
+ cabc_ctrl = 0;
+ acx565akm_read(ddata, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+ cabc_ctrl &= ~3;
+ cabc_ctrl |= (1 << 8) | (mode & 3);
+ acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned get_cabc_mode(struct panel_drv_data *ddata)
+{
+ return ddata->cabc_mode;
+}
+
+static unsigned get_hw_cabc_mode(struct panel_drv_data *ddata)
+{
+ u8 cabc_ctrl;
+
+ acx565akm_read(ddata, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+ return cabc_ctrl & 3;
+}
+
+static void acx565akm_set_brightness(struct panel_drv_data *ddata, int level)
+{
+ int bv;
+
+ bv = level | (1 << 8);
+ acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
+
+ if (level)
+ enable_backlight_ctrl(ddata, 1);
+ else
+ enable_backlight_ctrl(ddata, 0);
+}
+
+static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata)
+{
+ u8 bv;
+
+ acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
+
+ return bv;
+}
+
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r;
+ int level;
+
+ dev_dbg(&ddata->spi->dev, "%s\n", __func__);
+
+ mutex_lock(&ddata->mutex);
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ r = 0;
+ if (ddata->has_bc)
+ acx565akm_set_brightness(ddata, level);
+ else
+ r = -ENODEV;
+
+ mutex_unlock(&ddata->mutex);
+
+ return r;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ if (!ddata->has_bc)
+ return -ENODEV;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK) {
+ if (ddata->has_bc)
+ return acx565akm_get_actual_brightness(ddata);
+ else
+ return dev->props.brightness;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+ .get_brightness = acx565akm_bl_get_intensity,
+ .update_status = acx565akm_bl_update_status,
+};
+
+/*--------------------Auto Brightness control via Sysfs---------------------*/
+
+static const char * const cabc_modes[] = {
+ "off", /* always used when CABC is not supported */
+ "ui",
+ "still-image",
+ "moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ const char *mode_str;
+ int mode;
+ int len;
+
+ if (!ddata->has_cabc)
+ mode = 0;
+ else
+ mode = get_cabc_mode(ddata);
+ mode_str = "unknown";
+ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+ mode_str = cabc_modes[mode];
+ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+ const char *mode_str = cabc_modes[i];
+ int cmp_len = strlen(mode_str);
+
+ if (count > 0 && buf[count - 1] == '\n')
+ count--;
+ if (count != cmp_len)
+ continue;
+
+ if (strncmp(buf, mode_str, cmp_len) == 0)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cabc_modes))
+ return -EINVAL;
+
+ if (!ddata->has_cabc && i != 0)
+ return -EINVAL;
+
+ mutex_lock(&ddata->mutex);
+ set_cabc_mode(ddata, i);
+ mutex_unlock(&ddata->mutex);
+
+ return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ int len;
+ int i;
+
+ if (!ddata->has_cabc)
+ return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+
+ for (i = 0, len = 0;
+ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+ i ? " " : "", cabc_modes[i],
+ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+ return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+ show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+ show_cabc_available_modes, NULL);
+
+static struct attribute *bldev_attrs[] = {
+ &dev_attr_cabc_mode.attr,
+ &dev_attr_cabc_available_modes.attr,
+ NULL,
+};
+
+static struct attribute_group bldev_attr_group = {
+ .attrs = bldev_attrs,
+};
+
+static int acx565akm_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.sdi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void acx565akm_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.sdi->disconnect(in, dssdev);
+}
+
+static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(&ddata->spi->dev, "%s\n", __func__);
+
+ in->ops.sdi->set_timings(in, &ddata->videomode);
+ in->ops.sdi->set_datapairs(in, ddata->datapairs);
+
+ r = in->ops.sdi->enable(in);
+ if (r) {
+ pr_err("%s sdi enable failed\n", __func__);
+ return r;
+ }
+
+ /*FIXME tweak me */
+ msleep(50);
+
+ if (gpio_is_valid(ddata->reset_gpio))
+ gpio_set_value(ddata->reset_gpio, 1);
+
+ if (ddata->enabled) {
+ dev_dbg(&ddata->spi->dev, "panel already enabled\n");
+ return 0;
+ }
+
+ /*
+ * We have to meet all the following delay requirements:
+ * 1. tRW: reset pulse width 10usec (7.12.1)
+ * 2. tRT: reset cancel time 5msec (7.12.1)
+ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+ * case (7.6.2)
+ * 4. 120msec before the sleep out command (7.12.1)
+ */
+ msleep(120);
+
+ set_sleep_mode(ddata, 0);
+ ddata->enabled = 1;
+
+ /* 5msec between sleep out and the next command. (8.2.16) */
+ usleep_range(5000, 10000);
+ set_display_state(ddata, 1);
+ set_cabc_mode(ddata, ddata->cabc_mode);
+
+ mutex_unlock(&ddata->mutex);
+
+ return acx565akm_bl_update_status(ddata->bl_dev);
+}
+
+static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(dssdev->dev, "%s\n", __func__);
+
+ if (!ddata->enabled)
+ return;
+
+ set_display_state(ddata, 0);
+ set_sleep_mode(ddata, 1);
+ ddata->enabled = 0;
+ /*
+ * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+ * ~50msec) after sending the sleep in command and asserting the
+ * reset signal. We probably could assert the reset w/o the delay
+ * but we still delay to avoid possible artifacts. (7.6.1)
+ */
+ msleep(50);
+
+ if (gpio_is_valid(ddata->reset_gpio))
+ gpio_set_value(ddata->reset_gpio, 0);
+
+ /* FIXME need to tweak this delay */
+ msleep(100);
+
+ in->ops.sdi->disable(in);
+}
+
+static int acx565akm_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ int r;
+
+ dev_dbg(dssdev->dev, "%s\n", __func__);
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ mutex_lock(&ddata->mutex);
+ r = acx565akm_panel_power_on(dssdev);
+ mutex_unlock(&ddata->mutex);
+
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void acx565akm_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ dev_dbg(dssdev->dev, "%s\n", __func__);
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ mutex_lock(&ddata->mutex);
+ acx565akm_panel_power_off(dssdev);
+ mutex_unlock(&ddata->mutex);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void acx565akm_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.sdi->set_timings(in, timings);
+}
+
+static void acx565akm_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int acx565akm_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.sdi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver acx565akm_ops = {
+ .connect = acx565akm_connect,
+ .disconnect = acx565akm_disconnect,
+
+ .enable = acx565akm_enable,
+ .disable = acx565akm_disable,
+
+ .set_timings = acx565akm_set_timings,
+ .get_timings = acx565akm_get_timings,
+ .check_timings = acx565akm_check_timings,
+
+ .get_resolution = omapdss_default_get_resolution,
+};
+
+static int acx565akm_probe_pdata(struct spi_device *spi)
+{
+ const struct panel_acx565akm_platform_data *pdata;
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&spi->dev);
+
+ ddata->reset_gpio = pdata->reset_gpio;
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&spi->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+ ddata->in = in;
+
+ ddata->datapairs = pdata->datapairs;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int acx565akm_probe(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ struct backlight_device *bldev;
+ int max_brightness, brightness;
+ struct backlight_properties props;
+ int r;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->mode = SPI_MODE_3;
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ddata);
+
+ ddata->spi = spi;
+
+ mutex_init(&ddata->mutex);
+
+ if (dev_get_platdata(&spi->dev)) {
+ r = acx565akm_probe_pdata(spi);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ if (gpio_is_valid(ddata->reset_gpio)) {
+ r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd reset");
+ if (r)
+ goto err_gpio;
+ }
+
+ if (gpio_is_valid(ddata->reset_gpio))
+ gpio_set_value(ddata->reset_gpio, 1);
+
+ /*
+ * After reset we have to wait 5 msec before the first
+ * command can be sent.
+ */
+ usleep_range(5000, 10000);
+
+ ddata->enabled = panel_enabled(ddata);
+
+ r = panel_detect(ddata);
+
+ if (!ddata->enabled && gpio_is_valid(ddata->reset_gpio))
+ gpio_set_value(ddata->reset_gpio, 0);
+
+ if (r) {
+ dev_err(&spi->dev, "%s panel detect error\n", __func__);
+ goto err_detect;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.fb_blank = FB_BLANK_UNBLANK;
+ props.power = FB_BLANK_UNBLANK;
+ props.type = BACKLIGHT_RAW;
+
+ bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
+ ddata, &acx565akm_bl_ops, &props);
+ ddata->bl_dev = bldev;
+ if (ddata->has_cabc) {
+ r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
+ if (r) {
+ dev_err(&bldev->dev,
+ "%s failed to create sysfs files\n", __func__);
+ goto err_sysfs;
+ }
+ ddata->cabc_mode = get_hw_cabc_mode(ddata);
+ }
+
+ max_brightness = 255;
+
+ if (ddata->has_bc)
+ brightness = acx565akm_get_actual_brightness(ddata);
+ else
+ brightness = 0;
+
+ bldev->props.max_brightness = max_brightness;
+ bldev->props.brightness = brightness;
+
+ acx565akm_bl_update_status(bldev);
+
+
+ ddata->videomode = acx565akm_panel_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &spi->dev;
+ dssdev->driver = &acx565akm_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_SDI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&spi->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group);
+err_sysfs:
+ backlight_device_unregister(bldev);
+err_detect:
+err_gpio:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int acx565akm_remove(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->spi->dev, "%s\n", __func__);
+
+ sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group);
+ backlight_device_unregister(ddata->bl_dev);
+
+ omapdss_unregister_display(dssdev);
+
+ acx565akm_disable(dssdev);
+ acx565akm_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct spi_driver acx565akm_driver = {
+ .driver = {
+ .name = "acx565akm",
+ .owner = THIS_MODULE,
+ },
+ .probe = acx565akm_probe,
+ .remove = acx565akm_remove,
+};
+
+module_spi_driver(acx565akm_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("acx565akm LCD Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * TPO TD043MTEA1 Panel driver
+ *
+ * Author: Gražvydas Ignotas <notasas@gmail.com>
+ * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+#define TPO_R02_MODE(x) ((x) & 7)
+#define TPO_R02_MODE_800x480 7
+#define TPO_R02_NCLK_RISING BIT(3)
+#define TPO_R02_HSYNC_HIGH BIT(4)
+#define TPO_R02_VSYNC_HIGH BIT(5)
+
+#define TPO_R03_NSTANDBY BIT(0)
+#define TPO_R03_EN_CP_CLK BIT(1)
+#define TPO_R03_EN_VGL_PUMP BIT(2)
+#define TPO_R03_EN_PWM BIT(3)
+#define TPO_R03_DRIVING_CAP_100 BIT(4)
+#define TPO_R03_EN_PRE_CHARGE BIT(6)
+#define TPO_R03_SOFTWARE_CTL BIT(7)
+
+#define TPO_R04_NFLIP_H BIT(0)
+#define TPO_R04_NFLIP_V BIT(1)
+#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
+#define TPO_R04_VGL_FREQ_1H BIT(4)
+
+#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \
+ TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \
+ TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
+ TPO_R03_SOFTWARE_CTL)
+
+#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \
+ TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
+
+static const u16 tpo_td043_def_gamma[12] = {
+ 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
+};
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct omap_video_timings videomode;
+
+ int data_lines;
+
+ struct spi_device *spi;
+ struct regulator *vcc_reg;
+ int nreset_gpio;
+ u16 gamma[12];
+ u32 mode;
+ u32 hmirror:1;
+ u32 vmirror:1;
+ u32 powered_on:1;
+ u32 spi_suspended:1;
+ u32 power_on_resume:1;
+};
+
+static const struct omap_video_timings tpo_td043_timings = {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 36000,
+
+ .hsw = 1,
+ .hfp = 68,
+ .hbp = 214,
+
+ .vsw = 1,
+ .vfp = 39,
+ .vbp = 34,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
+{
+ struct spi_message m;
+ struct spi_transfer xfer;
+ u16 w;
+ int r;
+
+ spi_message_init(&m);
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ w = ((u16)addr << 10) | (1 << 8) | data;
+ xfer.tx_buf = &w;
+ xfer.bits_per_word = 16;
+ xfer.len = 2;
+ spi_message_add_tail(&xfer, &m);
+
+ r = spi_sync(spi, &m);
+ if (r < 0)
+ dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r);
+ return r;
+}
+
+static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12])
+{
+ u8 i, val;
+
+ /* gamma bits [9:8] */
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
+ tpo_td043_write(spi, 0x11, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2);
+ tpo_td043_write(spi, 0x12, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2);
+ tpo_td043_write(spi, 0x13, val);
+
+ /* gamma bits [7:0] */
+ for (val = i = 0; i < 12; i++)
+ tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff);
+}
+
+static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
+{
+ u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
+ TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
+ if (h)
+ reg4 &= ~TPO_R04_NFLIP_H;
+ if (v)
+ reg4 &= ~TPO_R04_NFLIP_V;
+
+ return tpo_td043_write(spi, 4, reg4);
+}
+
+static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
+
+ ddata->hmirror = enable;
+ return tpo_td043_write_mirror(ddata->spi, ddata->hmirror,
+ ddata->vmirror);
+}
+
+static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
+
+ return ddata->hmirror;
+}
+
+static ssize_t tpo_td043_vmirror_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
+}
+
+static ssize_t tpo_td043_vmirror_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ val = !!val;
+
+ ret = tpo_td043_write_mirror(ddata->spi, ddata->hmirror, val);
+ if (ret < 0)
+ return ret;
+
+ ddata->vmirror = val;
+
+ return count;
+}
+
+static ssize_t tpo_td043_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
+}
+
+static ssize_t tpo_td043_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+ if (ret != 0 || val & ~7)
+ return -EINVAL;
+
+ ddata->mode = val;
+
+ val |= TPO_R02_NCLK_RISING;
+ tpo_td043_write(ddata->spi, 2, val);
+
+ return count;
+}
+
+static ssize_t tpo_td043_gamma_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ ssize_t len = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
+ ddata->gamma[i]);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t tpo_td043_gamma_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ unsigned int g[12];
+ int ret;
+ int i;
+
+ ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
+ &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
+ &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
+
+ if (ret != 12)
+ return -EINVAL;
+
+ for (i = 0; i < 12; i++)
+ ddata->gamma[i] = g[i];
+
+ tpo_td043_write_gamma(ddata->spi, ddata->gamma);
+
+ return count;
+}
+
+static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR,
+ tpo_td043_vmirror_show, tpo_td043_vmirror_store);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ tpo_td043_mode_show, tpo_td043_mode_store);
+static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR,
+ tpo_td043_gamma_show, tpo_td043_gamma_store);
+
+static struct attribute *tpo_td043_attrs[] = {
+ &dev_attr_vmirror.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_gamma.attr,
+ NULL,
+};
+
+static struct attribute_group tpo_td043_attr_group = {
+ .attrs = tpo_td043_attrs,
+};
+
+static int tpo_td043_power_on(struct panel_drv_data *ddata)
+{
+ int r;
+
+ if (ddata->powered_on)
+ return 0;
+
+ r = regulator_enable(ddata->vcc_reg);
+ if (r != 0)
+ return r;
+
+ /* wait for panel to stabilize */
+ msleep(160);
+
+ if (gpio_is_valid(ddata->nreset_gpio))
+ gpio_set_value(ddata->nreset_gpio, 1);
+
+ tpo_td043_write(ddata->spi, 2,
+ TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING);
+ tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL);
+ tpo_td043_write(ddata->spi, 0x20, 0xf0);
+ tpo_td043_write(ddata->spi, 0x21, 0xf0);
+ tpo_td043_write_mirror(ddata->spi, ddata->hmirror,
+ ddata->vmirror);
+ tpo_td043_write_gamma(ddata->spi, ddata->gamma);
+
+ ddata->powered_on = 1;
+ return 0;
+}
+
+static void tpo_td043_power_off(struct panel_drv_data *ddata)
+{
+ if (!ddata->powered_on)
+ return;
+
+ tpo_td043_write(ddata->spi, 3,
+ TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
+
+ if (gpio_is_valid(ddata->nreset_gpio))
+ gpio_set_value(ddata->nreset_gpio, 0);
+
+ /* wait for at least 2 vsyncs before cutting off power */
+ msleep(50);
+
+ tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY);
+
+ regulator_disable(ddata->vcc_reg);
+
+ ddata->powered_on = 0;
+}
+
+static int tpo_td043_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int tpo_td043_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ /*
+ * If we are resuming from system suspend, SPI clocks might not be
+ * enabled yet, so we'll program the LCD from SPI PM resume callback.
+ */
+ if (!ddata->spi_suspended) {
+ r = tpo_td043_power_on(ddata);
+ if (r) {
+ in->ops.dpi->disable(in);
+ return r;
+ }
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void tpo_td043_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ in->ops.dpi->disable(in);
+
+ if (!ddata->spi_suspended)
+ tpo_td043_power_off(ddata);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver tpo_td043_ops = {
+ .connect = tpo_td043_connect,
+ .disconnect = tpo_td043_disconnect,
+
+ .enable = tpo_td043_enable,
+ .disable = tpo_td043_disable,
+
+ .set_timings = tpo_td043_set_timings,
+ .get_timings = tpo_td043_get_timings,
+ .check_timings = tpo_td043_check_timings,
+
+ .set_mirror = tpo_td043_set_hmirror,
+ .get_mirror = tpo_td043_get_hmirror,
+
+ .get_resolution = omapdss_default_get_resolution,
+};
+
+
+static int tpo_td043_probe_pdata(struct spi_device *spi)
+{
+ const struct panel_tpo_td043mtea1_platform_data *pdata;
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&spi->dev);
+
+ ddata->nreset_gpio = pdata->nreset_gpio;
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&spi->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int tpo_td043_probe(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->bits_per_word = 16;
+ spi->mode = SPI_MODE_0;
+
+ r = spi_setup(spi);
+ if (r < 0) {
+ dev_err(&spi->dev, "spi_setup failed: %d\n", r);
+ return r;
+ }
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ddata);
+
+ ddata->spi = spi;
+
+ if (dev_get_platdata(&spi->dev)) {
+ r = tpo_td043_probe_pdata(spi);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->mode = TPO_R02_MODE_800x480;
+ memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
+
+ ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
+ if (IS_ERR(ddata->vcc_reg)) {
+ dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
+ r = PTR_ERR(ddata->vcc_reg);
+ goto err_regulator;
+ }
+
+ if (gpio_is_valid(ddata->nreset_gpio)) {
+ r = devm_gpio_request_one(&spi->dev,
+ ddata->nreset_gpio, GPIOF_OUT_INIT_LOW,
+ "lcd reset");
+ if (r < 0) {
+ dev_err(&spi->dev, "couldn't request reset GPIO\n");
+ goto err_gpio_req;
+ }
+ }
+
+ r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group);
+ if (r) {
+ dev_err(&spi->dev, "failed to create sysfs files\n");
+ goto err_sysfs;
+ }
+
+ ddata->videomode = tpo_td043_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &spi->dev;
+ dssdev->driver = &tpo_td043_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&spi->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
+err_sysfs:
+err_gpio_req:
+err_regulator:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int tpo_td043_remove(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->spi->dev, "%s\n", __func__);
+
+ omapdss_unregister_display(dssdev);
+
+ tpo_td043_disable(dssdev);
+ tpo_td043_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tpo_td043_spi_suspend(struct device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", ddata);
+
+ ddata->power_on_resume = ddata->powered_on;
+ tpo_td043_power_off(ddata);
+ ddata->spi_suspended = 1;
+
+ return 0;
+}
+
+static int tpo_td043_spi_resume(struct device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(dev, "tpo_td043_spi_resume\n");
+
+ if (ddata->power_on_resume) {
+ ret = tpo_td043_power_on(ddata);
+ if (ret)
+ return ret;
+ }
+ ddata->spi_suspended = 0;
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm,
+ tpo_td043_spi_suspend, tpo_td043_spi_resume);
+
+static struct spi_driver tpo_td043_spi_driver = {
+ .driver = {
+ .name = "panel-tpo-td043mtea1",
+ .owner = THIS_MODULE,
+ .pm = &tpo_td043_spi_pm,
+ },
+ .probe = tpo_td043_probe,
+ .remove = tpo_td043_remove,
+};
+
+module_spi_driver(tpo_td043_spi_driver);
+
+MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
+MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver");
+MODULE_LICENSE("GPL");
-menu "OMAP2/3 Display Device Drivers"
+menu "OMAP2/3 Display Device Drivers (old device model)"
depends on OMAP2_DSS
config PANEL_GENERIC_DPI
int max_brightness, brightness;
struct backlight_properties props;
- dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dev_dbg(dssdev->dev, "%s\n", __func__);
if (!panel_data)
return -EINVAL;
dssdev->panel.timings = acx_panel_timings;
if (gpio_is_valid(panel_data->reset_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, panel_data->reset_gpio,
+ r = devm_gpio_request_one(dssdev->dev, panel_data->reset_gpio,
GPIOF_OUT_INIT_LOW, "lcd reset");
if (r)
return r;
r = panel_detect(md);
if (r) {
- dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
+ dev_err(dssdev->dev, "%s panel detect error\n", __func__);
if (!md->enabled && gpio_is_valid(panel_data->reset_gpio))
gpio_set_value(panel_data->reset_gpio, 0);
{
struct acx565akm_device *md = &acx_dev;
- dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dev_dbg(dssdev->dev, "%s\n", __func__);
sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
backlight_device_unregister(md->bl_dev);
mutex_lock(&acx_dev.mutex);
struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
int r;
- dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dev_dbg(dssdev->dev, "%s\n", __func__);
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
struct acx565akm_device *md = &acx_dev;
struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
- dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dev_dbg(dssdev->dev, "%s\n", __func__);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
{
int r;
- dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dev_dbg(dssdev->dev, "%s\n", __func__);
r = acx_panel_power_on(dssdev);
if (r)
static void acx_panel_disable(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dev_dbg(dssdev->dev, "%s\n", __func__);
acx_panel_power_off(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
{
int r, i;
struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
struct panel_config *panel_config = drv_data->panel_config;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
{
struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
struct panel_config *panel_config = drv_data->panel_config;
int i;
struct panel_drv_data *drv_data = NULL;
int i, r;
- dev_dbg(&dssdev->dev, "probe\n");
+ dev_dbg(dssdev->dev, "probe\n");
if (!panel_data || !panel_data->name)
return -EINVAL;
return -EINVAL;
for (i = 0; i < panel_data->num_gpios; ++i) {
- r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+ r = devm_gpio_request_one(dssdev->dev, panel_data->gpios[i],
panel_data->gpio_invert[i] ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
"panel gpio");
dssdev->panel.timings = panel_config->timings;
- drv_data = devm_kzalloc(&dssdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ drv_data = devm_kzalloc(dssdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
mutex_init(&drv_data->lock);
- dev_set_drvdata(&dssdev->dev, drv_data);
+ dev_set_drvdata(dssdev->dev, drv_data);
return 0;
}
static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "remove\n");
+ dev_dbg(dssdev->dev, "remove\n");
- dev_set_drvdata(&dssdev->dev, NULL);
+ dev_set_drvdata(dssdev->dev, NULL);
}
static int generic_dpi_panel_enable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&drv_data->lock);
static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
mutex_lock(&drv_data->lock);
static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
mutex_lock(&drv_data->lock);
static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
mutex_lock(&drv_data->lock);
static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&drv_data->lock);
dssdev->panel.timings = lb035q02_timings;
- ld = devm_kzalloc(&dssdev->dev, sizeof(*ld), GFP_KERNEL);
+ ld = devm_kzalloc(dssdev->dev, sizeof(*ld), GFP_KERNEL);
if (!ld)
return -ENOMEM;
for (i = 0; i < panel_data->num_gpios; ++i) {
- r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+ r = devm_gpio_request_one(dssdev->dev, panel_data->gpios[i],
panel_data->gpio_invert[i] ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
"panel gpio");
}
mutex_init(&ld->lock);
- dev_set_drvdata(&dssdev->dev, ld);
+ dev_set_drvdata(dssdev->dev, ld);
return 0;
}
static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+ struct lb035q02_data *ld = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&ld->lock);
static void lb035q02_panel_disable(struct omap_dss_device *dssdev)
{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+ struct lb035q02_data *ld = dev_get_drvdata(dssdev->dev);
mutex_lock(&ld->lock);
switch (rev & 0xfc) {
case 0x9c:
ddata->blizzard_ver = BLIZZARD_VERSION_S1D13744;
- dev_info(&dssdev->dev, "s1d13744 LCD controller rev %d "
+ dev_info(dssdev->dev, "s1d13744 LCD controller rev %d "
"initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
break;
case 0xa4:
ddata->blizzard_ver = BLIZZARD_VERSION_S1D13745;
- dev_info(&dssdev->dev, "s1d13745 LCD controller rev %d "
+ dev_info(dssdev->dev, "s1d13745 LCD controller rev %d "
"initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
break;
default:
- dev_err(&dssdev->dev, "invalid s1d1374x revision %02x\n", rev);
+ dev_err(dssdev->dev, "invalid s1d1374x revision %02x\n", rev);
r = -ENODEV;
goto err_inv_chip;
}
panel_name = "ls041y3";
break;
default:
- dev_err(&dssdev->dev, "invalid display ID 0x%x\n",
+ dev_err(dssdev->dev, "invalid display ID 0x%x\n",
display_id[0]);
r = -ENODEV;
goto err_inv_panel;
}
- dev_info(&dssdev->dev, "%s rev %02x LCD detected\n",
+ dev_info(dssdev->dev, "%s rev %02x LCD detected\n",
panel_name, display_id[1]);
send_sleep_out(spi);
struct panel_drv_data *ddata;
int r;
- dev_dbg(&dssdev->dev, "probe\n");
+ dev_dbg(dssdev->dev, "probe\n");
if (!bdata)
return -EINVAL;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
if (gpio_is_valid(bdata->panel_reset)) {
- r = devm_gpio_request_one(&dssdev->dev, bdata->panel_reset,
+ r = devm_gpio_request_one(dssdev->dev, bdata->panel_reset,
GPIOF_OUT_INIT_LOW, "PANEL RESET");
if (r)
return r;
}
if (gpio_is_valid(bdata->ctrl_pwrdown)) {
- r = devm_gpio_request_one(&dssdev->dev, bdata->ctrl_pwrdown,
+ r = devm_gpio_request_one(dssdev->dev, bdata->ctrl_pwrdown,
GPIOF_OUT_INIT_LOW, "PANEL PWRDOWN");
if (r)
return r;
static void n8x0_panel_remove(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "remove\n");
+ dev_dbg(dssdev->dev, "remove\n");
- dev_set_drvdata(&dssdev->dev, NULL);
+ dev_set_drvdata(dssdev->dev, NULL);
}
static int n8x0_panel_enable(struct omap_dss_device *dssdev)
struct panel_drv_data *ddata = get_drv_data(dssdev);
int r;
- dev_dbg(&dssdev->dev, "enable\n");
+ dev_dbg(dssdev->dev, "enable\n");
mutex_lock(&ddata->lock);
{
struct panel_drv_data *ddata = get_drv_data(dssdev);
- dev_dbg(&dssdev->dev, "disable\n");
+ dev_dbg(dssdev->dev, "disable\n");
mutex_lock(&ddata->lock);
struct panel_drv_data *ddata = get_drv_data(dssdev);
u16 dw, dh;
- dev_dbg(&dssdev->dev, "update\n");
+ dev_dbg(dssdev->dev, "update\n");
dw = dssdev->panel.timings.x_res;
dh = dssdev->panel.timings.y_res;
if (x != 0 || y != 0 || w != dw || h != dh) {
- dev_err(&dssdev->dev, "invaid update region %d, %d, %d, %d\n",
+ dev_err(dssdev->dev, "invaid update region %d, %d, %d, %d\n",
x, y, w, h);
return -EINVAL;
}
{
struct panel_drv_data *ddata = get_drv_data(dssdev);
- dev_dbg(&dssdev->dev, "sync\n");
+ dev_dbg(dssdev->dev, "sync\n");
mutex_lock(&ddata->lock);
rfbi_bus_lock();
dssdev->panel.timings = nec_8048_panel_timings;
if (gpio_is_valid(pd->qvga_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->qvga_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->qvga_gpio,
GPIOF_OUT_INIT_HIGH, "lcd QVGA");
if (r)
return r;
}
if (gpio_is_valid(pd->res_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->res_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->res_gpio,
GPIOF_OUT_INIT_LOW, "lcd RES");
if (r)
return r;
static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
{
int r, trial = 100;
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
while (!gpio_get_value(picodlp_pdata->emu_done_gpio)) {
if (!trial--) {
- dev_err(&dssdev->dev, "emu_done signal not"
+ dev_err(dssdev->dev, "emu_done signal not"
" going high\n");
return -ETIMEDOUT;
}
r = omapdss_dpi_display_enable(dssdev);
if (r) {
- dev_err(&dssdev->dev, "failed to enable DPI\n");
+ dev_err(dssdev->dev, "failed to enable DPI\n");
goto err1;
}
if (!picodlp_pdata)
return -EINVAL;
- picod = devm_kzalloc(&dssdev->dev, sizeof(*picod), GFP_KERNEL);
+ picod = devm_kzalloc(dssdev->dev, sizeof(*picod), GFP_KERNEL);
if (!picod)
return -ENOMEM;
adapter = i2c_get_adapter(picodlp_adapter_id);
if (!adapter) {
- dev_err(&dssdev->dev, "can't get i2c adapter\n");
+ dev_err(dssdev->dev, "can't get i2c adapter\n");
return -ENODEV;
}
picodlp_i2c_client = i2c_new_device(adapter, &picodlp_i2c_board_info);
if (!picodlp_i2c_client) {
- dev_err(&dssdev->dev, "can't add i2c device::"
+ dev_err(dssdev->dev, "can't add i2c device::"
" picodlp_i2c_client is NULL\n");
return -ENODEV;
}
picod->picodlp_i2c_client = picodlp_i2c_client;
- dev_set_drvdata(&dssdev->dev, picod);
+ dev_set_drvdata(dssdev->dev, picod);
if (gpio_is_valid(picodlp_pdata->emu_done_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev,
+ r = devm_gpio_request_one(dssdev->dev,
picodlp_pdata->emu_done_gpio,
GPIOF_IN, "DLP EMU DONE");
if (r)
}
if (gpio_is_valid(picodlp_pdata->pwrgood_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev,
+ r = devm_gpio_request_one(dssdev->dev,
picodlp_pdata->pwrgood_gpio,
GPIOF_OUT_INIT_LOW, "DLP PWRGOOD");
if (r)
static void picodlp_panel_remove(struct omap_dss_device *dssdev)
{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
i2c_unregister_device(picod->picodlp_i2c_client);
- dev_set_drvdata(&dssdev->dev, NULL);
- dev_dbg(&dssdev->dev, "removing picodlp panel\n");
-
- kfree(picod);
+ dev_set_drvdata(dssdev->dev, NULL);
+ dev_dbg(dssdev->dev, "removing picodlp panel\n");
}
static int picodlp_panel_enable(struct omap_dss_device *dssdev)
{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
int r;
- dev_dbg(&dssdev->dev, "enabling picodlp panel\n");
+ dev_dbg(dssdev->dev, "enabling picodlp panel\n");
mutex_lock(&picod->lock);
if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
static void picodlp_panel_disable(struct omap_dss_device *dssdev)
{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
mutex_lock(&picod->lock);
/* Turn off DLP Power */
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
mutex_unlock(&picod->lock);
- dev_dbg(&dssdev->dev, "disabling picodlp panel\n");
+ dev_dbg(dssdev->dev, "disabling picodlp panel\n");
}
static void picodlp_get_resolution(struct omap_dss_device *dssdev,
dssdev->panel.timings = sharp_ls_timings;
if (gpio_is_valid(pd->mo_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->mo_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->mo_gpio,
GPIOF_OUT_INIT_LOW, "lcd MO");
if (r)
return r;
}
if (gpio_is_valid(pd->lr_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->lr_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->lr_gpio,
GPIOF_OUT_INIT_HIGH, "lcd LR");
if (r)
return r;
}
if (gpio_is_valid(pd->ud_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->ud_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->ud_gpio,
GPIOF_OUT_INIT_HIGH, "lcd UD");
if (r)
return r;
}
if (gpio_is_valid(pd->resb_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->resb_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->resb_gpio,
GPIOF_OUT_INIT_LOW, "lcd RESB");
if (r)
return r;
}
if (gpio_is_valid(pd->ini_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, pd->ini_gpio,
+ r = devm_gpio_request_one(dssdev->dev, pd->ini_gpio,
GPIOF_OUT_INIT_LOW, "lcd INI");
if (r)
return r;
static void taal_queue_esd_work(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
if (td->esd_interval > 0)
queue_delayed_work(td->workqueue, &td->esd_work,
static void taal_cancel_esd_work(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
cancel_delayed_work(&td->esd_work);
}
static void taal_queue_ulps_work(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
if (td->ulps_timeout > 0)
queue_delayed_work(td->workqueue, &td->ulps_work,
static void taal_cancel_ulps_work(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
cancel_delayed_work(&td->ulps_work);
}
static int taal_enter_ulps(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
if (td->ulps_enabled)
return 0;
err:
- dev_err(&dssdev->dev, "enter ULPS failed");
+ dev_err(dssdev->dev, "enter ULPS failed");
taal_panel_reset(dssdev);
td->ulps_enabled = false;
static int taal_exit_ulps(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
if (!td->ulps_enabled)
r = omapdss_dsi_display_enable(dssdev);
if (r) {
- dev_err(&dssdev->dev, "failed to enable DSI\n");
+ dev_err(dssdev->dev, "failed to enable DSI\n");
goto err1;
}
r = _taal_enable_te(dssdev, true);
if (r) {
- dev_err(&dssdev->dev, "failed to re-enable TE");
+ dev_err(dssdev->dev, "failed to re-enable TE");
goto err2;
}
return 0;
err2:
- dev_err(&dssdev->dev, "failed to exit ULPS");
+ dev_err(dssdev->dev, "failed to exit ULPS");
r = taal_panel_reset(dssdev);
if (!r) {
static int taal_wake_up(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
if (td->ulps_enabled)
return taal_exit_ulps(dssdev);
static int taal_bl_update_status(struct backlight_device *dev)
{
struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
int level;
else
level = 0;
- dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
+ dev_dbg(dssdev->dev, "update brightness to %d\n", level);
mutex_lock(&td->lock);
struct device_attribute *attr, char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
u8 errors = 0;
int r;
struct device_attribute *attr, char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
u8 id1, id2, id3;
int r;
char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
const char *mode_str;
int mode;
int len;
const char *buf, size_t count)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int i;
int r;
const char *buf, size_t count)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
unsigned long t;
int r;
char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
unsigned t;
mutex_lock(&td->lock);
const char *buf, size_t count)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
unsigned long t;
int r;
char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
unsigned t;
mutex_lock(&td->lock);
const char *buf, size_t count)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
unsigned long t;
int r;
char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
unsigned t;
mutex_lock(&td->lock);
static void taal_hw_reset(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
if (!gpio_is_valid(td->reset_gpio))
return;
struct backlight_device *bldev = NULL;
int r;
- dev_dbg(&dssdev->dev, "probe\n");
+ dev_dbg(dssdev->dev, "probe\n");
- td = devm_kzalloc(&dssdev->dev, sizeof(*td), GFP_KERNEL);
+ td = devm_kzalloc(dssdev->dev, sizeof(*td), GFP_KERNEL);
if (!td)
return -ENOMEM;
- dev_set_drvdata(&dssdev->dev, td);
+ dev_set_drvdata(dssdev->dev, td);
td->dssdev = dssdev;
if (dssdev->data) {
atomic_set(&td->do_update, 0);
if (gpio_is_valid(td->reset_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, td->reset_gpio,
+ r = devm_gpio_request_one(dssdev->dev, td->reset_gpio,
GPIOF_OUT_INIT_LOW, "taal rst");
if (r) {
- dev_err(&dssdev->dev, "failed to request reset gpio\n");
+ dev_err(dssdev->dev, "failed to request reset gpio\n");
return r;
}
}
if (gpio_is_valid(td->ext_te_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, td->ext_te_gpio,
+ r = devm_gpio_request_one(dssdev->dev, td->ext_te_gpio,
GPIOF_IN, "taal irq");
if (r) {
- dev_err(&dssdev->dev, "GPIO request failed\n");
+ dev_err(dssdev->dev, "GPIO request failed\n");
return r;
}
- r = devm_request_irq(&dssdev->dev, gpio_to_irq(td->ext_te_gpio),
+ r = devm_request_irq(dssdev->dev, gpio_to_irq(td->ext_te_gpio),
taal_te_isr,
IRQF_TRIGGER_RISING,
"taal vsync", dssdev);
if (r) {
- dev_err(&dssdev->dev, "IRQ request failed\n");
+ dev_err(dssdev->dev, "IRQ request failed\n");
return r;
}
INIT_DEFERRABLE_WORK(&td->te_timeout_work,
taal_te_timeout_work_callback);
- dev_dbg(&dssdev->dev, "Using GPIO TE\n");
+ dev_dbg(dssdev->dev, "Using GPIO TE\n");
}
td->workqueue = create_singlethread_workqueue("taal_esd");
if (td->workqueue == NULL) {
- dev_err(&dssdev->dev, "can't create ESD workqueue\n");
+ dev_err(dssdev->dev, "can't create ESD workqueue\n");
return -ENOMEM;
}
INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
props.max_brightness = 255;
props.type = BACKLIGHT_RAW;
- bldev = backlight_device_register(dev_name(&dssdev->dev),
- &dssdev->dev, dssdev, &taal_bl_ops, &props);
+ bldev = backlight_device_register(dev_name(dssdev->dev),
+ dssdev->dev, dssdev, &taal_bl_ops, &props);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
goto err_bl;
r = omap_dsi_request_vc(dssdev, &td->channel);
if (r) {
- dev_err(&dssdev->dev, "failed to get virtual channel\n");
+ dev_err(dssdev->dev, "failed to get virtual channel\n");
goto err_req_vc;
}
r = omap_dsi_set_vc_id(dssdev, td->channel, TCH);
if (r) {
- dev_err(&dssdev->dev, "failed to set VC_ID\n");
+ dev_err(dssdev->dev, "failed to set VC_ID\n");
goto err_vc_id;
}
- r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
+ r = sysfs_create_group(&dssdev->dev->kobj, &taal_attr_group);
if (r) {
- dev_err(&dssdev->dev, "failed to create sysfs files\n");
+ dev_err(dssdev->dev, "failed to create sysfs files\n");
goto err_vc_id;
}
static void __exit taal_remove(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
struct backlight_device *bldev;
- dev_dbg(&dssdev->dev, "remove\n");
+ dev_dbg(dssdev->dev, "remove\n");
- sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
+ sysfs_remove_group(&dssdev->dev->kobj, &taal_attr_group);
omap_dsi_release_vc(dssdev, td->channel);
bldev = td->bldev;
static int taal_power_on(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
u8 id1, id2, id3;
int r;
struct omap_dss_dsi_config dsi_config = {
r = omapdss_dsi_configure_pins(dssdev, &td->pin_config);
if (r) {
- dev_err(&dssdev->dev, "failed to configure DSI pins\n");
+ dev_err(dssdev->dev, "failed to configure DSI pins\n");
goto err0;
};
r = omapdss_dsi_set_config(dssdev, &dsi_config);
if (r) {
- dev_err(&dssdev->dev, "failed to configure DSI\n");
+ dev_err(dssdev->dev, "failed to configure DSI\n");
goto err0;
}
r = omapdss_dsi_display_enable(dssdev);
if (r) {
- dev_err(&dssdev->dev, "failed to enable DSI\n");
+ dev_err(dssdev->dev, "failed to enable DSI\n");
goto err0;
}
td->enabled = 1;
if (!td->intro_printed) {
- dev_info(&dssdev->dev, "panel revision %02x.%02x.%02x\n",
+ dev_info(dssdev->dev, "panel revision %02x.%02x.%02x\n",
id1, id2, id3);
if (td->cabc_broken)
- dev_info(&dssdev->dev,
+ dev_info(dssdev->dev,
"old Taal version, CABC disabled\n");
td->intro_printed = true;
}
return 0;
err:
- dev_err(&dssdev->dev, "error while enabling panel, issuing HW reset\n");
+ dev_err(dssdev->dev, "error while enabling panel, issuing HW reset\n");
taal_hw_reset(dssdev);
static void taal_power_off(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
dsi_disable_video_output(dssdev, td->channel);
r = taal_sleep_in(td);
if (r) {
- dev_err(&dssdev->dev,
+ dev_err(dssdev->dev,
"error disabling panel, issuing HW reset\n");
taal_hw_reset(dssdev);
}
static int taal_panel_reset(struct omap_dss_device *dssdev)
{
- dev_err(&dssdev->dev, "performing LCD reset\n");
+ dev_err(dssdev->dev, "performing LCD reset\n");
taal_power_off(dssdev);
taal_hw_reset(dssdev);
static int taal_enable(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
- dev_dbg(&dssdev->dev, "enable\n");
+ dev_dbg(dssdev->dev, "enable\n");
mutex_lock(&td->lock);
return 0;
err:
- dev_dbg(&dssdev->dev, "enable failed\n");
+ dev_dbg(dssdev->dev, "enable failed\n");
mutex_unlock(&td->lock);
return r;
}
static void taal_disable(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
- dev_dbg(&dssdev->dev, "disable\n");
+ dev_dbg(dssdev->dev, "disable\n");
mutex_lock(&td->lock);
static void taal_framedone_cb(int err, void *data)
{
struct omap_dss_device *dssdev = data;
- dev_dbg(&dssdev->dev, "framedone, err %d\n", err);
+ dev_dbg(dssdev->dev, "framedone, err %d\n", err);
dsi_bus_unlock(dssdev);
}
static irqreturn_t taal_te_isr(int irq, void *data)
{
struct omap_dss_device *dssdev = data;
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int old;
int r;
return IRQ_HANDLED;
err:
- dev_err(&dssdev->dev, "start update failed\n");
+ dev_err(dssdev->dev, "start update failed\n");
dsi_bus_unlock(dssdev);
return IRQ_HANDLED;
}
te_timeout_work.work);
struct omap_dss_device *dssdev = td->dssdev;
- dev_err(&dssdev->dev, "TE not received for 250ms!\n");
+ dev_err(dssdev->dev, "TE not received for 250ms!\n");
atomic_set(&td->do_update, 0);
dsi_bus_unlock(dssdev);
static int taal_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
- dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
+ dev_dbg(dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
mutex_lock(&td->lock);
dsi_bus_lock(dssdev);
static int taal_sync(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
- dev_dbg(&dssdev->dev, "sync\n");
+ dev_dbg(dssdev->dev, "sync\n");
mutex_lock(&td->lock);
dsi_bus_lock(dssdev);
dsi_bus_unlock(dssdev);
mutex_unlock(&td->lock);
- dev_dbg(&dssdev->dev, "sync done\n");
+ dev_dbg(dssdev->dev, "sync done\n");
return 0;
}
static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
if (enable)
static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&td->lock);
static int taal_get_te(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&td->lock);
static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
u8 id1, id2, id3;
int r;
int first = 1;
int plen;
unsigned buf_used = 0;
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct taal_data *td = dev_get_drvdata(dssdev->dev);
if (size < w * h * 3)
return -ENOMEM;
buf + buf_used, size - buf_used);
if (r < 0) {
- dev_err(&dssdev->dev, "read error\n");
+ dev_err(dssdev->dev, "read error\n");
goto err3;
}
buf_used += r;
if (r < plen) {
- dev_err(&dssdev->dev, "short read\n");
+ dev_err(dssdev->dev, "short read\n");
break;
}
if (signal_pending(current)) {
- dev_err(&dssdev->dev, "signal pending, "
+ dev_err(dssdev->dev, "signal pending, "
"aborting memory read\n");
r = -ERESTARTSYS;
goto err3;
r = taal_wake_up(dssdev);
if (r) {
- dev_err(&dssdev->dev, "failed to exit ULPS\n");
+ dev_err(dssdev->dev, "failed to exit ULPS\n");
goto err;
}
r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state1);
if (r) {
- dev_err(&dssdev->dev, "failed to read Taal status\n");
+ dev_err(dssdev->dev, "failed to read Taal status\n");
goto err;
}
/* Run self diagnostics */
r = taal_sleep_out(td);
if (r) {
- dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n");
+ dev_err(dssdev->dev, "failed to run Taal self-diagnostics\n");
goto err;
}
r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state2);
if (r) {
- dev_err(&dssdev->dev, "failed to read Taal status\n");
+ dev_err(dssdev->dev, "failed to read Taal status\n");
goto err;
}
* Bit6 if the test passes.
*/
if (!((state1 ^ state2) & (1 << 6))) {
- dev_err(&dssdev->dev, "LCD self diagnostics failed\n");
+ dev_err(dssdev->dev, "LCD self diagnostics failed\n");
goto err;
}
/* Self-diagnostics result is also shown on TE GPIO line. We need
mutex_unlock(&td->lock);
return;
err:
- dev_err(&dssdev->dev, "performing LCD reset\n");
+ dev_err(dssdev->dev, "performing LCD reset\n");
taal_panel_reset(dssdev);
static int tfp410_power_on(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
int r;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
static void tfp410_power_off(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
int r;
int i2c_bus_num;
- ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
+ ddata = devm_kzalloc(dssdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
}
if (gpio_is_valid(ddata->pd_gpio)) {
- r = devm_gpio_request_one(&dssdev->dev, ddata->pd_gpio,
+ r = devm_gpio_request_one(dssdev->dev, ddata->pd_gpio,
GPIOF_OUT_INIT_LOW, "tfp410 pd");
if (r) {
- dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
+ dev_err(dssdev->dev, "Failed to request PD GPIO %d\n",
ddata->pd_gpio);
return r;
}
adapter = i2c_get_adapter(i2c_bus_num);
if (!adapter) {
- dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+ dev_err(dssdev->dev, "Failed to get I2C adapter, bus %d\n",
i2c_bus_num);
return -EPROBE_DEFER;
}
ddata->i2c_adapter = adapter;
}
- dev_set_drvdata(&dssdev->dev, ddata);
+ dev_set_drvdata(dssdev->dev, ddata);
return 0;
}
static void __exit tfp410_remove(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
mutex_lock(&ddata->lock);
if (ddata->i2c_adapter)
i2c_put_adapter(ddata->i2c_adapter);
- dev_set_drvdata(&dssdev->dev, NULL);
+ dev_set_drvdata(dssdev->dev, NULL);
mutex_unlock(&ddata->lock);
}
static int tfp410_enable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&ddata->lock);
static void tfp410_disable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
mutex_lock(&ddata->lock);
static void tfp410_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
mutex_lock(&ddata->lock);
omapdss_dpi_set_timings(dssdev, timings);
static void tfp410_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
mutex_lock(&ddata->lock);
*timings = dssdev->panel.timings;
static int tfp410_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
int r;
mutex_lock(&ddata->lock);
static int tfp410_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
int r, l, bytes_read;
mutex_lock(&ddata->lock);
static bool tfp410_detect(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
unsigned char out;
int r;
static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable)
{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
tpo_td043->hmirror = enable;
return tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror,
static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev)
{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
return tpo_td043->hmirror;
}
static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
int r;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
static void tpo_td043_disable_dss(struct omap_dss_device *dssdev)
{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
static int tpo_td043_enable(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "enable\n");
+ dev_dbg(dssdev->dev, "enable\n");
return tpo_td043_enable_dss(dssdev);
}
static void tpo_td043_disable(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "disable\n");
+ dev_dbg(dssdev->dev, "disable\n");
tpo_td043_disable_dss(dssdev);
struct panel_tpo_td043_data *pdata = get_panel_data(dssdev);
int ret = 0;
- dev_dbg(&dssdev->dev, "probe\n");
+ dev_dbg(dssdev->dev, "probe\n");
if (tpo_td043 == NULL) {
- dev_err(&dssdev->dev, "missing tpo_td043_device\n");
+ dev_err(dssdev->dev, "missing tpo_td043_device\n");
return -ENODEV;
}
tpo_td043->mode = TPO_R02_MODE_800x480;
memcpy(tpo_td043->gamma, tpo_td043_def_gamma, sizeof(tpo_td043->gamma));
- tpo_td043->vcc_reg = regulator_get(&dssdev->dev, "vcc");
+ tpo_td043->vcc_reg = regulator_get(dssdev->dev, "vcc");
if (IS_ERR(tpo_td043->vcc_reg)) {
- dev_err(&dssdev->dev, "failed to get LCD VCC regulator\n");
+ dev_err(dssdev->dev, "failed to get LCD VCC regulator\n");
ret = PTR_ERR(tpo_td043->vcc_reg);
goto fail_regulator;
}
if (gpio_is_valid(tpo_td043->nreset_gpio)) {
- ret = devm_gpio_request_one(&dssdev->dev,
+ ret = devm_gpio_request_one(dssdev->dev,
tpo_td043->nreset_gpio, GPIOF_OUT_INIT_LOW,
"lcd reset");
if (ret < 0) {
- dev_err(&dssdev->dev, "couldn't request reset GPIO\n");
+ dev_err(dssdev->dev, "couldn't request reset GPIO\n");
goto fail_gpio_req;
}
}
- ret = sysfs_create_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
+ ret = sysfs_create_group(&dssdev->dev->kobj, &tpo_td043_attr_group);
if (ret)
- dev_warn(&dssdev->dev, "failed to create sysfs files\n");
+ dev_warn(dssdev->dev, "failed to create sysfs files\n");
- dev_set_drvdata(&dssdev->dev, tpo_td043);
+ dev_set_drvdata(dssdev->dev, tpo_td043);
return 0;
static void tpo_td043_remove(struct omap_dss_device *dssdev)
{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
- dev_dbg(&dssdev->dev, "remove\n");
+ dev_dbg(dssdev->dev, "remove\n");
- sysfs_remove_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
+ sysfs_remove_group(&dssdev->dev->kobj, &tpo_td043_attr_group);
regulator_put(tpo_td043->vcc_reg);
}
menuconfig OMAP2_DSS
tristate "OMAP2+ Display Subsystem support"
+ select VIDEOMODE_HELPERS
help
OMAP2+ Display Subsystem support.
DSSWARN("timeout in wait_pending_extra_info_updates\n");
}
-static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
+static struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
{
- return ovl->manager ?
- (ovl->manager->output ? ovl->manager->output->device : NULL) :
- NULL;
+ struct omap_dss_device *dssdev;
+
+ dssdev = mgr->output;
+ if (dssdev == NULL)
+ return NULL;
+
+ while (dssdev->device)
+ dssdev = dssdev->device;
+
+ if (dssdev->driver)
+ return dssdev;
+ else
+ return NULL;
}
-static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
+static struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
{
- return mgr->output ? mgr->output->device : NULL;
+ return ovl->manager ? dss_mgr_get_device(ovl->manager) : NULL;
}
static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
}
}
+static int dss_mgr_connect_compat(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst)
+{
+ return mgr->set_output(mgr, dst);
+}
+
+static void dss_mgr_disconnect_compat(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst)
+{
+ mgr->unset_output(mgr);
+}
+
static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
}
static int dss_mgr_set_output(struct omap_overlay_manager *mgr,
- struct omap_dss_output *output)
+ struct omap_dss_device *output)
{
int r;
}
static const struct dss_mgr_ops apply_mgr_ops = {
+ .connect = dss_mgr_connect_compat,
+ .disconnect = dss_mgr_disconnect_compat,
.start_update = dss_mgr_start_update_compat,
.enable = dss_mgr_enable_compat,
.disable = dss_mgr_disable_compat,
int omapdss_compat_init(void)
{
struct platform_device *pdev = dss_get_core_pdev();
- struct omap_dss_device *dssdev = NULL;
int i, r;
mutex_lock(&compat_init_lock);
apply_init_priv();
- dss_init_overlay_managers(pdev);
+ dss_init_overlay_managers_sysfs(pdev);
dss_init_overlays(pdev);
for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
if (r)
goto err_mgr_ops;
- for_each_dss_dev(dssdev) {
- r = display_init_sysfs(pdev, dssdev);
- /* XXX uninit sysfs files on error */
- if (r)
- goto err_disp_sysfs;
- }
+ r = display_init_sysfs(pdev);
+ if (r)
+ goto err_disp_sysfs;
dispc_runtime_get();
err_init_irq:
dispc_runtime_put();
+ display_uninit_sysfs(pdev);
err_disp_sysfs:
dss_uninstall_mgr_ops();
err_mgr_ops:
- dss_uninit_overlay_managers(pdev);
+ dss_uninit_overlay_managers_sysfs(pdev);
dss_uninit_overlays(pdev);
compat_refcnt--;
void omapdss_compat_uninit(void)
{
struct platform_device *pdev = dss_get_core_pdev();
- struct omap_dss_device *dssdev = NULL;
mutex_lock(&compat_init_lock);
dss_dispc_uninitialize_irq();
- for_each_dss_dev(dssdev)
- display_uninit_sysfs(pdev, dssdev);
+ display_uninit_sysfs(pdev);
dss_uninstall_mgr_ops();
- dss_uninit_overlay_managers(pdev);
+ dss_uninit_overlay_managers_sysfs(pdev);
dss_uninit_overlays(pdev);
out:
mutex_unlock(&compat_init_lock);
module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp, "default display name");
+static bool dss_initialized;
+
const char *omapdss_get_default_display_name(void)
{
return core.default_display_name;
}
EXPORT_SYMBOL(omapdss_get_version);
+bool omapdss_is_initialized(void)
+{
+ return dss_initialized;
+}
+EXPORT_SYMBOL(omapdss_is_initialized);
+
struct platform_device *dss_get_core_pdev(void)
{
return core.pdev;
if (core.vdds_dsi_reg != NULL)
return core.vdds_dsi_reg;
- reg = regulator_get(&core.pdev->dev, "vdds_dsi");
+ reg = devm_regulator_get(&core.pdev->dev, "vdds_dsi");
if (!IS_ERR(reg))
core.vdds_dsi_reg = reg;
if (core.vdds_sdi_reg != NULL)
return core.vdds_sdi_reg;
- reg = regulator_get(&core.pdev->dev, "vdds_sdi");
+ reg = devm_regulator_get(&core.pdev->dev, "vdds_sdi");
if (!IS_ERR(reg))
core.vdds_sdi_reg = reg;
if (def_disp_name)
core.default_display_name = def_disp_name;
+ else if (pdata->default_display_name)
+ core.default_display_name = pdata->default_display_name;
else if (pdata->default_device)
core.default_display_name = pdata->default_device->name;
return strcmp(dssdev->driver_name, driver->name) == 0;
}
-static ssize_t device_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- dssdev->name ?
- dssdev->name : "");
-}
-
-static struct device_attribute default_dev_attrs[] = {
- __ATTR(name, S_IRUGO, device_name_show, NULL),
- __ATTR_NULL,
-};
-
-static ssize_t driver_name_show(struct device_driver *drv, char *buf)
-{
- struct omap_dss_driver *dssdrv = to_dss_driver(drv);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- dssdrv->driver.name ?
- dssdrv->driver.name : "");
-}
-static struct driver_attribute default_drv_attrs[] = {
- __ATTR(name, S_IRUGO, driver_name_show, NULL),
- __ATTR_NULL,
-};
-
static struct bus_type dss_bus_type = {
.name = "omapdss",
.match = dss_bus_match,
- .dev_attrs = default_dev_attrs,
- .drv_attrs = default_drv_attrs,
};
static void dss_bus_release(struct device *dev)
return 0;
}
+static int omapdss_default_connect(struct omap_dss_device *dssdev)
+{
+ struct omap_dss_device *out;
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ out = dssdev->output;
+
+ if (out == NULL)
+ return -ENODEV;
+
+ mgr = omap_dss_get_overlay_manager(out->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, out);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void omapdss_default_disconnect(struct omap_dss_device *dssdev)
+{
+ struct omap_dss_device *out;
+ struct omap_overlay_manager *mgr;
+
+ out = dssdev->output;
+
+ if (out == NULL)
+ return;
+
+ mgr = out->manager;
+
+ if (mgr == NULL)
+ return;
+
+ dss_mgr_disconnect(mgr, out);
+}
+
int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
{
dssdriver->driver.bus = &dss_bus_type;
omapdss_default_get_recommended_bpp;
if (dssdriver->get_timings == NULL)
dssdriver->get_timings = omapdss_default_get_timings;
+ if (dssdriver->connect == NULL)
+ dssdriver->connect = omapdss_default_connect;
+ if (dssdriver->disconnect == NULL)
+ dssdriver->disconnect = omapdss_default_disconnect;
return driver_register(&dssdriver->driver);
}
if (!dssdev)
return NULL;
- dssdev->dev.bus = &dss_bus_type;
- dssdev->dev.parent = parent;
- dssdev->dev.release = omap_dss_dev_release;
- dev_set_name(&dssdev->dev, "display%d", disp_num_counter++);
+ dssdev->old_dev.bus = &dss_bus_type;
+ dssdev->old_dev.parent = parent;
+ dssdev->old_dev.release = omap_dss_dev_release;
+ dev_set_name(&dssdev->old_dev, "display%d", disp_num_counter++);
- device_initialize(&dssdev->dev);
+ device_initialize(&dssdev->old_dev);
return dssdev;
}
int dss_add_device(struct omap_dss_device *dssdev)
{
- return device_add(&dssdev->dev);
+ dssdev->dev = &dssdev->old_dev;
+
+ omapdss_register_display(dssdev);
+ return device_add(&dssdev->old_dev);
}
void dss_put_device(struct omap_dss_device *dssdev)
{
- put_device(&dssdev->dev);
+ put_device(&dssdev->old_dev);
}
void dss_unregister_device(struct omap_dss_device *dssdev)
{
- device_unregister(&dssdev->dev);
+ device_unregister(&dssdev->old_dev);
+ omapdss_unregister_display(dssdev);
}
static int dss_unregister_dss_dev(struct device *dev, void *data)
return r;
}
+ dss_initialized = true;
+
return 0;
}
static void __exit omap_dss_exit(void)
{
- if (core.vdds_dsi_reg != NULL) {
- regulator_put(core.vdds_dsi_reg);
- core.vdds_dsi_reg = NULL;
- }
-
- if (core.vdds_sdi_reg != NULL) {
- regulator_put(core.vdds_sdi_reg);
- core.vdds_sdi_reg = NULL;
- }
-
omap_dss_unregister_drivers();
omap_dss_bus_unregister();
static int __init omap_dss_init2(void)
{
- return omap_dss_register_drivers();
+ int r;
+
+ r = omap_dss_register_drivers();
+ if (r)
+ return r;
+
+ dss_initialized = true;
+
+ return 0;
}
core_initcall(omap_dss_init);
if (bit & errors) {
DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
ovl->name);
- dispc_ovl_enable(ovl->id, false);
- dispc_mgr_go(ovl->manager->id);
+ ovl->disable(ovl);
msleep(50);
}
}
int irq;
unsigned long core_clk_rate;
+ unsigned long tv_pclk_rate;
u32 fifo_size[DISPC_MAX_NR_FIFOS];
/* maps which plane is using a fifo. fifo-id -> plane-id */
return r / pcd;
} else {
- enum dss_hdmi_venc_clk_source_select source;
-
- source = dss_get_hdmi_venc_clk_source();
-
- switch (source) {
- case DSS_VENC_TV_CLK:
- return venc_get_pixel_clock();
- case DSS_HDMI_M_PCLK:
- return hdmi_get_pixel_clock();
- default:
- BUG();
- return 0;
- }
+ return dispc.tv_pclk_rate;
}
}
+void dispc_set_tv_pclk(unsigned long pclk)
+{
+ dispc.tv_pclk_rate = pclk;
+}
+
unsigned long dispc_core_clk_rate(void)
{
return dispc.core_clk_rate;
dispc_runtime_put();
+ dss_init_overlay_managers();
+
dss_debugfs_create_file("dispc", dispc_dump_regs);
return 0;
{
pm_runtime_disable(&pdev->dev);
+ dss_uninit_overlay_managers();
+
return 0;
}
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/jiffies.h>
#include <linux/platform_device.h>
+#include <linux/sysfs.h>
#include <video/omapdss.h>
#include "dss.h"
-#include "dss_features.h"
+
+static struct omap_dss_device *to_dss_device_sysfs(struct device *dev)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (dssdev->dev == dev) {
+ omap_dss_put_device(dssdev);
+ return dssdev;
+ }
+ }
+
+ return NULL;
+}
+
+static ssize_t display_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ dssdev->name ?
+ dssdev->name : "");
+}
static ssize_t display_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ omapdss_device_is_enabled(dssdev));
}
static ssize_t display_enabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int r;
- bool enabled;
+ bool enable;
- r = strtobool(buf, &enabled);
+ r = strtobool(buf, &enable);
if (r)
return r;
- if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
- if (enabled) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
- } else {
- dssdev->driver->disable(dssdev);
- }
+ if (enable == omapdss_device_is_enabled(dssdev))
+ return size;
+
+ if (omapdss_device_is_connected(dssdev) == false)
+ return -ENODEV;
+
+ if (enable) {
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ return r;
+ } else {
+ dssdev->driver->disable(dssdev);
}
return size;
static ssize_t display_tear_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
return snprintf(buf, PAGE_SIZE, "%d\n",
dssdev->driver->get_te ?
dssdev->driver->get_te(dssdev) : 0);
static ssize_t display_tear_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int r;
bool te;
static ssize_t display_timings_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
struct omap_video_timings t;
if (!dssdev->driver->get_timings)
static ssize_t display_timings_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
struct omap_video_timings t = dssdev->panel.timings;
int r, found;
static ssize_t display_rotate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int rotate;
if (!dssdev->driver->get_rotate)
return -ENOENT;
static ssize_t display_rotate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int rot, r;
if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
static ssize_t display_mirror_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int mirror;
if (!dssdev->driver->get_mirror)
return -ENOENT;
static ssize_t display_mirror_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int r;
bool mirror;
static ssize_t display_wss_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
unsigned int wss;
if (!dssdev->driver->get_wss)
static ssize_t display_wss_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
u32 wss;
int r;
return size;
}
+static DEVICE_ATTR(name, S_IRUGO, display_name_show, NULL);
static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
display_wss_show, display_wss_store);
-static struct device_attribute *display_sysfs_attrs[] = {
- &dev_attr_enabled,
- &dev_attr_tear_elim,
- &dev_attr_timings,
- &dev_attr_rotate,
- &dev_attr_mirror,
- &dev_attr_wss,
+static const struct attribute *display_sysfs_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_tear_elim.attr,
+ &dev_attr_timings.attr,
+ &dev_attr_rotate.attr,
+ &dev_attr_mirror.attr,
+ &dev_attr_wss.attr,
NULL
};
-int display_init_sysfs(struct platform_device *pdev,
- struct omap_dss_device *dssdev)
+int display_init_sysfs(struct platform_device *pdev)
{
- struct device_attribute *attr;
- int i, r;
+ struct omap_dss_device *dssdev = NULL;
+ int r;
- /* create device sysfs files */
- i = 0;
- while ((attr = display_sysfs_attrs[i++]) != NULL) {
- r = device_create_file(&dssdev->dev, attr);
- if (r) {
- for (i = i - 2; i >= 0; i--) {
- attr = display_sysfs_attrs[i];
- device_remove_file(&dssdev->dev, attr);
- }
+ for_each_dss_dev(dssdev) {
+ struct kobject *kobj = &dssdev->dev->kobj;
- DSSERR("failed to create sysfs file\n");
- return r;
+ r = sysfs_create_files(kobj, display_sysfs_attrs);
+ if (r) {
+ DSSERR("failed to create sysfs files\n");
+ goto err;
}
- }
- /* create display? sysfs links */
- r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
- dev_name(&dssdev->dev));
- if (r) {
- while ((attr = display_sysfs_attrs[i++]) != NULL)
- device_remove_file(&dssdev->dev, attr);
+ r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias);
+ if (r) {
+ sysfs_remove_files(kobj, display_sysfs_attrs);
- DSSERR("failed to create sysfs display link\n");
- return r;
+ DSSERR("failed to create sysfs display link\n");
+ goto err;
+ }
}
return 0;
+
+err:
+ display_uninit_sysfs(pdev);
+
+ return r;
}
-void display_uninit_sysfs(struct platform_device *pdev,
- struct omap_dss_device *dssdev)
+void display_uninit_sysfs(struct platform_device *pdev)
{
- struct device_attribute *attr;
- int i = 0;
-
- sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
+ struct omap_dss_device *dssdev = NULL;
- while ((attr = display_sysfs_attrs[i++]) != NULL)
- device_remove_file(&dssdev->dev, attr);
+ for_each_dss_dev(dssdev) {
+ sysfs_remove_link(&pdev->dev.kobj, dssdev->alias);
+ sysfs_remove_files(&dssdev->dev->kobj,
+ display_sysfs_attrs);
+ }
}
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
case OMAP_DISPLAY_TYPE_HDMI:
+ case OMAP_DISPLAY_TYPE_DVI:
return 24;
default:
BUG();
}
EXPORT_SYMBOL(omapdss_default_get_timings);
-static int dss_suspend_device(struct device *dev, void *data)
+int dss_suspend_all_devices(void)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- dssdev->activate_after_resume = false;
- return 0;
- }
-
- dssdev->driver->disable(dssdev);
-
- dssdev->activate_after_resume = true;
+ struct omap_dss_device *dssdev = NULL;
- return 0;
-}
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
-int dss_suspend_all_devices(void)
-{
- int r;
- struct bus_type *bus = dss_get_bus();
-
- r = bus_for_each_dev(bus, NULL, NULL, dss_suspend_device);
- if (r) {
- /* resume all displays that were suspended */
- dss_resume_all_devices();
- return r;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ dssdev->driver->disable(dssdev);
+ dssdev->activate_after_resume = true;
+ } else {
+ dssdev->activate_after_resume = false;
+ }
}
return 0;
}
-static int dss_resume_device(struct device *dev, void *data)
+int dss_resume_all_devices(void)
{
- int r;
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_device *dssdev = NULL;
- if (dssdev->activate_after_resume) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
- }
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
- dssdev->activate_after_resume = false;
+ if (dssdev->activate_after_resume) {
+ dssdev->driver->enable(dssdev);
+ dssdev->activate_after_resume = false;
+ }
+ }
return 0;
}
-int dss_resume_all_devices(void)
+void dss_disable_all_devices(void)
{
- struct bus_type *bus = dss_get_bus();
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
- return bus_for_each_dev(bus, NULL, NULL, dss_resume_device);
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ dssdev->driver->disable(dssdev);
+ }
}
-static int dss_disable_device(struct device *dev, void *data)
+static LIST_HEAD(panel_list);
+static DEFINE_MUTEX(panel_list_mutex);
+static int disp_num_counter;
+
+int omapdss_register_display(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_driver *drv = dssdev->driver;
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
- dssdev->driver->disable(dssdev);
+ snprintf(dssdev->alias, sizeof(dssdev->alias),
+ "display%d", disp_num_counter++);
+ if (drv && drv->get_resolution == NULL)
+ drv->get_resolution = omapdss_default_get_resolution;
+ if (drv && drv->get_recommended_bpp == NULL)
+ drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
+ if (drv && drv->get_timings == NULL)
+ drv->get_timings = omapdss_default_get_timings;
+
+ mutex_lock(&panel_list_mutex);
+ list_add_tail(&dssdev->panel_list, &panel_list);
+ mutex_unlock(&panel_list_mutex);
return 0;
}
+EXPORT_SYMBOL(omapdss_register_display);
-void dss_disable_all_devices(void)
+void omapdss_unregister_display(struct omap_dss_device *dssdev)
{
- struct bus_type *bus = dss_get_bus();
- bus_for_each_dev(bus, NULL, NULL, dss_disable_device);
+ mutex_lock(&panel_list_mutex);
+ list_del(&dssdev->panel_list);
+ mutex_unlock(&panel_list_mutex);
}
+EXPORT_SYMBOL(omapdss_unregister_display);
-
-void omap_dss_get_device(struct omap_dss_device *dssdev)
+struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev)
{
- get_device(&dssdev->dev);
+ if (!try_module_get(dssdev->owner))
+ return NULL;
+
+ if (get_device(dssdev->dev) == NULL) {
+ module_put(dssdev->owner);
+ return NULL;
+ }
+
+ return dssdev;
}
EXPORT_SYMBOL(omap_dss_get_device);
void omap_dss_put_device(struct omap_dss_device *dssdev)
{
- put_device(&dssdev->dev);
+ put_device(dssdev->dev);
+ module_put(dssdev->owner);
}
EXPORT_SYMBOL(omap_dss_put_device);
-/* ref count of the found device is incremented. ref count
- * of from-device is decremented. */
+/*
+ * ref count of the found device is incremented.
+ * ref count of from-device is decremented.
+ */
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
{
- struct device *dev;
- struct device *dev_start = NULL;
- struct omap_dss_device *dssdev = NULL;
+ struct list_head *l;
+ struct omap_dss_device *dssdev;
+
+ mutex_lock(&panel_list_mutex);
- int match(struct device *dev, void *data)
- {
- return 1;
+ if (list_empty(&panel_list)) {
+ dssdev = NULL;
+ goto out;
}
- if (from)
- dev_start = &from->dev;
- dev = bus_find_device(dss_get_bus(), dev_start, NULL, match);
- if (dev)
- dssdev = to_dss_device(dev);
- if (from)
- put_device(&from->dev);
+ if (from == NULL) {
+ dssdev = list_first_entry(&panel_list, struct omap_dss_device,
+ panel_list);
+ omap_dss_get_device(dssdev);
+ goto out;
+ }
+
+ omap_dss_put_device(from);
+
+ list_for_each(l, &panel_list) {
+ dssdev = list_entry(l, struct omap_dss_device, panel_list);
+ if (dssdev == from) {
+ if (list_is_last(l, &panel_list)) {
+ dssdev = NULL;
+ goto out;
+ }
+
+ dssdev = list_entry(l->next, struct omap_dss_device,
+ panel_list);
+ omap_dss_get_device(dssdev);
+ goto out;
+ }
+ }
+ WARN(1, "'from' dssdev not found\n");
+
+ dssdev = NULL;
+out:
+ mutex_unlock(&panel_list_mutex);
return dssdev;
}
EXPORT_SYMBOL(omap_dss_get_next_device);
}
EXPORT_SYMBOL(omap_dss_find_device);
-int omap_dss_start_device(struct omap_dss_device *dssdev)
+void videomode_to_omap_video_timings(const struct videomode *vm,
+ struct omap_video_timings *ovt)
{
- if (!dssdev->driver) {
- DSSDBG("no driver\n");
- return -ENODEV;
- }
-
- if (!try_module_get(dssdev->dev.driver->owner)) {
- return -ENODEV;
- }
-
- return 0;
+ memset(ovt, 0, sizeof(*ovt));
+
+ ovt->pixel_clock = vm->pixelclock / 1000;
+ ovt->x_res = vm->hactive;
+ ovt->hbp = vm->hback_porch;
+ ovt->hfp = vm->hfront_porch;
+ ovt->hsw = vm->hsync_len;
+ ovt->y_res = vm->vactive;
+ ovt->vbp = vm->vback_porch;
+ ovt->vfp = vm->vfront_porch;
+ ovt->vsw = vm->vsync_len;
+
+ ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_LOW;
+ ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_LOW;
+ ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_HIGH;
+ ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
+ OMAPDSS_DRIVE_SIG_RISING_EDGE :
+ OMAPDSS_DRIVE_SIG_FALLING_EDGE;
+
+ ovt->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
}
-EXPORT_SYMBOL(omap_dss_start_device);
+EXPORT_SYMBOL(videomode_to_omap_video_timings);
-void omap_dss_stop_device(struct omap_dss_device *dssdev)
+void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
+ struct videomode *vm)
{
- module_put(dssdev->dev.driver->owner);
+ memset(vm, 0, sizeof(*vm));
+
+ vm->pixelclock = ovt->pixel_clock * 1000;
+
+ vm->hactive = ovt->x_res;
+ vm->hback_porch = ovt->hbp;
+ vm->hfront_porch = ovt->hfp;
+ vm->hsync_len = ovt->hsw;
+ vm->vactive = ovt->y_res;
+ vm->vback_porch = ovt->vbp;
+ vm->vfront_porch = ovt->vfp;
+ vm->vsync_len = ovt->vsw;
+
+ if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+ vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ else
+ vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
+
+ if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+ vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ else
+ vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH)
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH;
+ else
+ vm->flags |= DISPLAY_FLAGS_DE_LOW;
+
+ if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ else
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
}
-EXPORT_SYMBOL(omap_dss_stop_device);
-
+EXPORT_SYMBOL(omap_video_timings_to_videomode);
#include "dss_features.h"
static struct {
+ struct platform_device *pdev;
+
struct regulator *vdds_dsi_reg;
struct platform_device *dsidev;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
- struct omap_dss_output output;
+ struct omap_dss_device output;
} dpi;
static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = &dpi.output;
+ struct omap_dss_device *out = &dpi.output;
int r;
mutex_lock(&dpi.lock);
goto err_no_out_mgr;
}
- r = omap_dss_start_device(dssdev);
- if (r) {
- DSSERR("failed to start device\n");
- goto err_start_dev;
- }
-
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
r = regulator_enable(dpi.vdds_dsi_reg);
if (r)
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
regulator_disable(dpi.vdds_dsi_reg);
err_reg_enable:
- omap_dss_stop_device(dssdev);
-err_start_dev:
err_no_out_mgr:
err_no_reg:
mutex_unlock(&dpi.lock);
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
regulator_disable(dpi.vdds_dsi_reg);
- omap_dss_stop_device(dssdev);
-
mutex_unlock(&dpi.lock);
}
EXPORT_SYMBOL(omapdss_dpi_display_disable);
}
EXPORT_SYMBOL(omapdss_dpi_set_timings);
+static void dpi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ mutex_lock(&dpi.lock);
+
+ *timings = dpi.timings;
+
+ mutex_unlock(&dpi.lock);
+}
+
int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
return 0;
}
+static int dpi_init_regulator(void)
+{
+ struct regulator *vdds_dsi;
+
+ if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ return 0;
+
+ if (dpi.vdds_dsi_reg)
+ return 0;
+
+ vdds_dsi = dss_get_vdds_dsi();
+
+ if (IS_ERR(vdds_dsi)) {
+ vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+ }
+
+ dpi.vdds_dsi_reg = vdds_dsi;
+
+ return 0;
+}
+
+static void dpi_init_pll(void)
+{
+ struct platform_device *dsidev;
+
+ if (dpi.dsidev)
+ return;
+
+ dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
+ if (!dsidev)
+ return;
+
+ if (dpi_verify_dsi_pll(dsidev)) {
+ DSSWARN("DSI PLL not operational\n");
+ return;
+ }
+
+ dpi.dsidev = dsidev;
+}
+
/*
* Return a hardcoded channel for the DPI output. This should work for
* current use cases, but this can be later expanded to either resolve
}
}
-static int dpi_init_display(struct omap_dss_device *dssdev)
-{
- struct platform_device *dsidev;
-
- DSSDBG("init_display\n");
-
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) &&
- dpi.vdds_dsi_reg == NULL) {
- struct regulator *vdds_dsi;
-
- vdds_dsi = dss_get_vdds_dsi();
-
- if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dpi.vdds_dsi_reg = vdds_dsi;
- }
-
- dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
-
- if (dsidev && dpi_verify_dsi_pll(dsidev)) {
- dsidev = NULL;
- DSSWARN("DSI PLL not operational\n");
- }
-
- if (dsidev)
- DSSDBG("using DSI PLL for DPI clock\n");
-
- dpi.dsidev = dsidev;
-
- return 0;
-}
-
static struct omap_dss_device *dpi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
if (!plat_dssdev)
return 0;
+ r = dpi_init_regulator();
+ if (r)
+ return r;
+
+ dpi_init_pll();
+
dssdev = dss_alloc_and_init_device(&dpidev->dev);
if (!dssdev)
return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
- r = dpi_init_display(dssdev);
- if (r) {
- DSSERR("device %s init failed: %d\n", dssdev->name, r);
- dss_put_device(dssdev);
- return r;
- }
-
r = omapdss_output_set_device(&dpi.output, dssdev);
if (r) {
DSSERR("failed to connect output to new device: %s\n",
return 0;
}
+static int dpi_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ r = dpi_init_regulator();
+ if (r)
+ return r;
+
+ dpi_init_pll();
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dst->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void dpi_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->device);
+
+ if (dst != dssdev->device)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static const struct omapdss_dpi_ops dpi_ops = {
+ .connect = dpi_connect,
+ .disconnect = dpi_disconnect,
+
+ .enable = omapdss_dpi_display_enable,
+ .disable = omapdss_dpi_display_disable,
+
+ .check_timings = dpi_check_timings,
+ .set_timings = omapdss_dpi_set_timings,
+ .get_timings = dpi_get_timings,
+
+ .set_data_lines = omapdss_dpi_set_data_lines,
+};
+
static void dpi_init_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &dpi.output;
+ struct omap_dss_device *out = &dpi.output;
- out->pdev = pdev;
+ out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
- out->type = OMAP_DISPLAY_TYPE_DPI;
+ out->output_type = OMAP_DISPLAY_TYPE_DPI;
out->name = "dpi.0";
out->dispc_channel = dpi_get_channel();
+ out->ops.dpi = &dpi_ops;
+ out->owner = THIS_MODULE;
- dss_register_output(out);
+ omapdss_register_output(out);
}
static void __exit dpi_uninit_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &dpi.output;
+ struct omap_dss_device *out = &dpi.output;
- dss_unregister_output(out);
+ omapdss_unregister_output(out);
}
static int omap_dpi_probe(struct platform_device *pdev)
{
int r;
+ dpi.pdev = pdev;
+
mutex_init(&dpi.lock);
dpi_init_output(pdev);
- r = dpi_probe_pdata(pdev);
- if (r) {
- dpi_uninit_output(pdev);
- return r;
+ if (pdev->dev.platform_data) {
+ r = dpi_probe_pdata(pdev);
+ if (r)
+ goto err_probe;
}
return 0;
+
+err_probe:
+ dpi_uninit_output(pdev);
+ return r;
}
static int __exit omap_dpi_remove(struct platform_device *pdev)
enum omap_dss_dsi_mode mode;
struct omap_dss_dsi_videomode_timings vm_timings;
- struct omap_dss_output output;
+ struct omap_dss_device output;
};
struct dsi_packet_sent_handler_data {
static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
{
- return dssdev->output->pdev;
+ /* HACK: dssdev can be either the panel device, when using old API, or
+ * the dsi device itself, when using the new API. So we solve this for
+ * now by checking the dssdev->id. This will be removed when the old API
+ * is removed.
+ */
+ if (dssdev->id == OMAP_DSS_OUTPUT_DSI1 ||
+ dssdev->id == OMAP_DSS_OUTPUT_DSI2)
+ return to_platform_device(dssdev->dev);
+
+ return to_platform_device(dssdev->output->dev);
}
struct platform_device *dsi_get_dsidev_from_id(int module)
{
- struct omap_dss_output *out;
+ struct omap_dss_device *out;
enum omap_dss_output_id id;
switch (module) {
out = omap_dss_get_output(id);
- return out ? out->pdev : NULL;
+ return out ? to_platform_device(out->dev) : NULL;
}
static inline void dsi_write_reg(struct platform_device *dsidev,
WARN_ON(r < 0 && r != -ENOSYS);
}
+static int dsi_regulator_init(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct regulator *vdds_dsi;
+
+ if (dsi->vdds_dsi_reg != NULL)
+ return 0;
+
+ vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdds_dsi");
+
+ /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(vdds_dsi))
+ vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "VCXIO");
+
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dsi->vdds_dsi_reg = vdds_dsi;
+
+ return 0;
+}
+
/* source clock for DSI PLL. this could also be PCLKFREE */
static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
bool enable)
*/
enable_hsclk = enable_hsdiv = true;
- if (dsi->vdds_dsi_reg == NULL) {
- struct regulator *vdds_dsi;
-
- vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
-
- /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
- if (IS_ERR(vdds_dsi))
- vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
-
- if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dsi->vdds_dsi_reg = vdds_dsi;
- }
+ r = dsi_regulator_init(dsidev);
+ if (r)
+ return r;
dsi_enable_pll_clock(dsidev, 1);
/*
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_overlay_manager *mgr = dsi->output.manager;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_dss_output *out = &dsi->output;
+ struct omap_dss_device *out = &dsi->output;
u8 data_type;
u16 word_count;
int r;
mutex_lock(&dsi->lock);
- r = omap_dss_start_device(dssdev);
- if (r) {
- DSSERR("failed to start device\n");
- goto err_start_dev;
- }
-
r = dsi_runtime_get(dsidev);
if (r)
goto err_get_dsi;
dsi_enable_pll_clock(dsidev, 0);
dsi_runtime_put(dsidev);
err_get_dsi:
- omap_dss_stop_device(dssdev);
-err_start_dev:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
return r;
dsi_runtime_put(dsidev);
dsi_enable_pll_clock(dsidev, 0);
- omap_dss_stop_device(dssdev);
-
mutex_unlock(&dsi->lock);
}
EXPORT_SYMBOL(omapdss_dsi_display_disable);
}
}
-static int dsi_init_display(struct omap_dss_device *dssdev)
-{
- struct platform_device *dsidev =
- dsi_get_dsidev_from_id(dssdev->phy.dsi.module);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- DSSDBG("DSI init\n");
-
- if (dsi->vdds_dsi_reg == NULL) {
- struct regulator *vdds_dsi;
-
- vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
-
- /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
- if (IS_ERR(vdds_dsi))
- vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
-
- if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dsi->vdds_dsi_reg = vdds_dsi;
- }
-
- return 0;
-}
-
int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
if (!plat_dssdev)
return 0;
+ r = dsi_regulator_init(dsidev);
+ if (r)
+ return r;
+
dssdev = dss_alloc_and_init_device(&dsidev->dev);
if (!dssdev)
return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
- r = dsi_init_display(dssdev);
- if (r) {
- DSSERR("device %s init failed: %d\n", dssdev->name, r);
- dss_put_device(dssdev);
- return r;
- }
-
r = omapdss_output_set_device(&dsi->output, dssdev);
if (r) {
DSSERR("failed to connect output to new device: %s\n",
return 0;
}
+static int dsi_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ r = dsi_regulator_init(dsidev);
+ if (r)
+ return r;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void dsi_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->device);
+
+ if (dst != dssdev->device)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static const struct omapdss_dsi_ops dsi_ops = {
+ .connect = dsi_connect,
+ .disconnect = dsi_disconnect,
+
+ .bus_lock = dsi_bus_lock,
+ .bus_unlock = dsi_bus_unlock,
+
+ .enable = omapdss_dsi_display_enable,
+ .disable = omapdss_dsi_display_disable,
+
+ .enable_hs = omapdss_dsi_vc_enable_hs,
+
+ .configure_pins = omapdss_dsi_configure_pins,
+ .set_config = omapdss_dsi_set_config,
+
+ .enable_video_output = dsi_enable_video_output,
+ .disable_video_output = dsi_disable_video_output,
+
+ .update = omap_dsi_update,
+
+ .enable_te = omapdss_dsi_enable_te,
+
+ .request_vc = omap_dsi_request_vc,
+ .set_vc_id = omap_dsi_set_vc_id,
+ .release_vc = omap_dsi_release_vc,
+
+ .dcs_write = dsi_vc_dcs_write,
+ .dcs_write_nosync = dsi_vc_dcs_write_nosync,
+ .dcs_read = dsi_vc_dcs_read,
+
+ .gen_write = dsi_vc_generic_write,
+ .gen_write_nosync = dsi_vc_generic_write_nosync,
+ .gen_read = dsi_vc_generic_read,
+
+ .bta_sync = dsi_vc_send_bta_sync,
+
+ .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
+};
+
static void dsi_init_output(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_dss_output *out = &dsi->output;
+ struct omap_dss_device *out = &dsi->output;
- out->pdev = dsidev;
+ out->dev = &dsidev->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
- out->type = OMAP_DISPLAY_TYPE_DSI;
+ out->output_type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
out->dispc_channel = dsi_get_channel(dsi->module_id);
+ out->ops.dsi = &dsi_ops;
+ out->owner = THIS_MODULE;
- dss_register_output(out);
+ omapdss_register_output(out);
}
static void dsi_uninit_output(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_dss_output *out = &dsi->output;
+ struct omap_dss_device *out = &dsi->output;
- dss_unregister_output(out);
+ omapdss_unregister_output(out);
}
/* DSI1 HW IP initialisation */
dsi_init_output(dsidev);
- r = dsi_probe_pdata(dsidev);
- if (r) {
- dsi_runtime_put(dsidev);
- dsi_uninit_output(dsidev);
- pm_runtime_disable(&dsidev->dev);
- return r;
+ if (dsidev->dev.platform_data) {
+ r = dsi_probe_pdata(dsidev);
+ if (r)
+ goto err_probe;
}
dsi_runtime_put(dsidev);
#endif
return 0;
+err_probe:
+ dsi_runtime_put(dsidev);
+ dsi_uninit_output(dsidev);
err_runtime_get:
pm_runtime_disable(&dsidev->dev);
return r;
pm_runtime_disable(&dsidev->dev);
- if (dsi->vdds_dsi_reg != NULL) {
- if (dsi->vdds_dsi_enabled) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
-
- regulator_put(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_reg = NULL;
+ if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
}
return 0;
int dss_get_ctx_loss_count(void)
{
- struct omap_dss_board_info *board_data = dss.pdev->dev.platform_data;
+ struct platform_device *core_pdev = dss_get_core_pdev();
+ struct omap_dss_board_info *board_data = core_pdev->dev.platform_data;
int cnt;
if (!board_data->get_context_loss_count)
void dss_copy_device_pdata(struct omap_dss_device *dst,
const struct omap_dss_device *src);
-/* output */
-void dss_register_output(struct omap_dss_output *out);
-void dss_unregister_output(struct omap_dss_output *out);
-
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
void dss_disable_all_devices(void);
-int display_init_sysfs(struct platform_device *pdev,
- struct omap_dss_device *dssdev);
-void display_uninit_sysfs(struct platform_device *pdev,
- struct omap_dss_device *dssdev);
+int display_init_sysfs(struct platform_device *pdev);
+void display_uninit_sysfs(struct platform_device *pdev);
/* manager */
-int dss_init_overlay_managers(struct platform_device *pdev);
-void dss_uninit_overlay_managers(struct platform_device *pdev);
+int dss_init_overlay_managers(void);
+void dss_uninit_overlay_managers(void);
+int dss_init_overlay_managers_sysfs(struct platform_device *pdev);
+void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev);
int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
const struct omap_overlay_manager_info *info);
int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
const struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
+void dispc_set_tv_pclk(unsigned long pclk);
u32 dispc_wb_get_framedone_irq(void);
bool dispc_wb_go_busy(void);
bool mem_to_mem, const struct omap_video_timings *timings);
/* VENC */
-#ifdef CONFIG_OMAP2_DSS_VENC
int venc_init_platform_driver(void) __init;
void venc_uninit_platform_driver(void) __exit;
-unsigned long venc_get_pixel_clock(void);
-#else
-static inline unsigned long venc_get_pixel_clock(void)
-{
- WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
- return 0;
-}
-#endif
int omapdss_venc_display_enable(struct omap_dss_device *dssdev);
void omapdss_venc_display_disable(struct omap_dss_device *dssdev);
void omapdss_venc_set_timings(struct omap_dss_device *dssdev,
void venc_panel_exit(void);
/* HDMI */
-#ifdef CONFIG_OMAP4_DSS_HDMI
int hdmi_init_platform_driver(void) __init;
void hdmi_uninit_platform_driver(void) __exit;
-unsigned long hdmi_get_pixel_clock(void);
-#else
-static inline unsigned long hdmi_get_pixel_clock(void)
-{
- WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
- return 0;
-}
-#endif
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev);
.phy_enable = ti_hdmi_4xxx_phy_enable,
.phy_disable = ti_hdmi_4xxx_phy_disable,
.read_edid = ti_hdmi_4xxx_read_edid,
- .detect = ti_hdmi_4xxx_detect,
.pll_enable = ti_hdmi_4xxx_pll_enable,
.pll_disable = ti_hdmi_4xxx_pll_disable,
.video_enable = ti_hdmi_4xxx_wp_video_start,
int ls_oe_gpio;
int hpd_gpio;
- struct omap_dss_output output;
+ bool core_enabled;
+
+ struct omap_dss_device output;
} hdmi;
/*
WARN_ON(r < 0 && r != -ENOSYS);
}
+static int hdmi_init_regulator(void)
+{
+ struct regulator *reg;
+
+ if (hdmi.vdda_hdmi_dac_reg != NULL)
+ return 0;
+
+ reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
+
+ /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(reg))
+ reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+
+ if (IS_ERR(reg)) {
+ DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ hdmi.vdda_hdmi_dac_reg = reg;
+
+ return 0;
+}
+
static int hdmi_init_display(struct omap_dss_device *dssdev)
{
int r;
dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
- if (hdmi.vdda_hdmi_dac_reg == NULL) {
- struct regulator *reg;
-
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
-
- /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
- if (IS_ERR(reg))
- reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
-
- if (IS_ERR(reg)) {
- DSSERR("can't get VDDA_HDMI_DAC regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi.vdda_hdmi_dac_reg = reg;
- }
+ r = hdmi_init_regulator();
+ if (r)
+ return r;
r = gpio_request_array(gpios, ARRAY_SIZE(gpios));
if (r)
}
-unsigned long hdmi_get_pixel_clock(void)
-{
- /* HDMI Pixel Clock in Mhz */
- return hdmi.ip_data.cfg.timings.pixel_clock * 1000;
-}
-
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
struct hdmi_pll_info *pi)
{
{
int r;
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 1);
- gpio_set_value(hdmi.ls_oe_gpio, 1);
+ if (gpio_is_valid(hdmi.ct_cp_hpd_gpio))
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 1);
+ if (gpio_is_valid(hdmi.ls_oe_gpio))
+ gpio_set_value(hdmi.ls_oe_gpio, 1);
/* wait 300us after CT_CP_HPD for the 5V power output to reach 90% */
udelay(300);
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+ hdmi.core_enabled = true;
+
return 0;
err_runtime_get:
regulator_disable(hdmi.vdda_hdmi_dac_reg);
err_vdac_enable:
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+ if (gpio_is_valid(hdmi.ct_cp_hpd_gpio))
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
+ if (gpio_is_valid(hdmi.ls_oe_gpio))
+ gpio_set_value(hdmi.ls_oe_gpio, 0);
return r;
}
static void hdmi_power_off_core(struct omap_dss_device *dssdev)
{
+ hdmi.core_enabled = false;
+
hdmi_runtime_put();
regulator_disable(hdmi.vdda_hdmi_dac_reg);
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+ if (gpio_is_valid(hdmi.ct_cp_hpd_gpio))
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
+ if (gpio_is_valid(hdmi.ls_oe_gpio))
+ gpio_set_value(hdmi.ls_oe_gpio, 0);
}
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = hdmi.output.manager;
unsigned long phy;
r = hdmi_power_on_core(dssdev);
static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = hdmi.output.manager;
dss_mgr_disable(mgr);
if (t != NULL)
hdmi.ip_data.cfg = *t;
+ dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+
mutex_unlock(&hdmi.lock);
}
+static void omapdss_hdmi_display_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ const struct hdmi_config *cfg;
+
+ cfg = hdmi_get_timings();
+ if (cfg == NULL)
+ cfg = &vesa_timings[0];
+
+ memcpy(timings, &cfg->timings, sizeof(cfg->timings));
+}
+
static void hdmi_dump_regs(struct seq_file *s)
{
mutex_lock(&hdmi.lock);
r = hdmi_runtime_get();
BUG_ON(r);
- r = hdmi.ip_data.ops->detect(&hdmi.ip_data);
+ r = gpio_get_value(hdmi.hpd_gpio);
hdmi_runtime_put();
mutex_unlock(&hdmi.lock);
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = dssdev->output;
+ struct omap_dss_device *out = &hdmi.output;
int r = 0;
DSSDBG("ENTER hdmi_display_enable\n");
goto err0;
}
- hdmi.ip_data.hpd_gpio = hdmi.hpd_gpio;
-
- r = omap_dss_start_device(dssdev);
- if (r) {
- DSSERR("failed to start device\n");
- goto err0;
- }
-
r = hdmi_power_on_full(dssdev);
if (r) {
DSSERR("failed to power on device\n");
- goto err1;
+ goto err0;
}
mutex_unlock(&hdmi.lock);
return 0;
-err1:
- omap_dss_stop_device(dssdev);
err0:
mutex_unlock(&hdmi.lock);
return r;
hdmi_power_off_full(dssdev);
- omap_dss_stop_device(dssdev);
-
mutex_unlock(&hdmi.lock);
}
mutex_lock(&hdmi.lock);
- hdmi.ip_data.hpd_gpio = hdmi.hpd_gpio;
-
r = hdmi_power_on_core(dssdev);
if (r) {
DSSERR("failed to power on device\n");
return 0;
}
+static int hdmi_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
+
+ r = hdmi_init_regulator();
+ if (r)
+ return r;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dst->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void hdmi_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->device);
+
+ if (dst != dssdev->device)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static int hdmi_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ bool need_enable;
+ int r;
+
+ need_enable = hdmi.core_enabled == false;
+
+ if (need_enable) {
+ r = omapdss_hdmi_core_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ r = omapdss_hdmi_read_edid(edid, len);
+
+ if (need_enable)
+ omapdss_hdmi_core_disable(dssdev);
+
+ return r;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int omapdss_hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ if (!hdmi_mode_has_audio()) {
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_enable();
+ if (r)
+ goto err;
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void omapdss_hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+ hdmi_audio_disable();
+}
+
+static int omapdss_hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+ return hdmi_audio_start();
+}
+
+static void omapdss_hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+ hdmi_audio_stop();
+}
+
+static bool omapdss_hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+ bool r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_mode_has_audio();
+
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static int omapdss_hdmi_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ if (!hdmi_mode_has_audio()) {
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_config(audio);
+ if (r)
+ goto err;
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+#else
+static int omapdss_hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void omapdss_hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int omapdss_hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void omapdss_hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool omapdss_hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+ return false;
+}
+
+static int omapdss_hdmi_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ return -EPERM;
+}
+#endif
+
+static const struct omapdss_hdmi_ops hdmi_ops = {
+ .connect = hdmi_connect,
+ .disconnect = hdmi_disconnect,
+
+ .enable = omapdss_hdmi_display_enable,
+ .disable = omapdss_hdmi_display_disable,
+
+ .check_timings = omapdss_hdmi_display_check_timing,
+ .set_timings = omapdss_hdmi_display_set_timing,
+ .get_timings = omapdss_hdmi_display_get_timings,
+
+ .read_edid = hdmi_read_edid,
+
+ .audio_enable = omapdss_hdmi_audio_enable,
+ .audio_disable = omapdss_hdmi_audio_disable,
+ .audio_start = omapdss_hdmi_audio_start,
+ .audio_stop = omapdss_hdmi_audio_stop,
+ .audio_supported = omapdss_hdmi_audio_supported,
+ .audio_config = omapdss_hdmi_audio_config,
+};
+
static void hdmi_init_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi.output;
- out->pdev = pdev;
+ out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->type = OMAP_DISPLAY_TYPE_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops.hdmi = &hdmi_ops;
+ out->owner = THIS_MODULE;
- dss_register_output(out);
+ omapdss_register_output(out);
}
static void __exit hdmi_uninit_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi.output;
- dss_unregister_output(out);
+ omapdss_unregister_output(out);
}
/* HDMI HW IP initialisation */
mutex_init(&hdmi.ip_data.lock);
res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DSSERR("can't get IORESOURCE_MEM HDMI\n");
- return -EINVAL;
- }
/* Base address taken from platform */
hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hdmi.ip_data.base_wp))
return PTR_ERR(hdmi.ip_data.base_wp);
+ hdmi.ip_data.irq = platform_get_irq(pdev, 0);
+ if (hdmi.ip_data.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ return -ENODEV;
+ }
+
r = hdmi_get_clocks(pdev);
if (r) {
DSSERR("can't get clocks\n");
hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
hdmi.ip_data.phy_offset = HDMI_PHY;
+ hdmi.ct_cp_hpd_gpio = -1;
+ hdmi.ls_oe_gpio = -1;
+ hdmi.hpd_gpio = -1;
+
hdmi_init_output(pdev);
r = hdmi_panel_init();
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
- r = hdmi_probe_pdata(pdev);
- if (r) {
- hdmi_panel_exit();
- hdmi_uninit_output(pdev);
- pm_runtime_disable(&pdev->dev);
- return r;
+ if (pdev->dev.platform_data) {
+ r = hdmi_probe_pdata(pdev);
+ if (r)
+ goto err_probe;
}
return 0;
+
+err_probe:
+ hdmi_panel_exit();
+ hdmi_uninit_output(pdev);
+ pm_runtime_disable(&pdev->dev);
+ return r;
}
static int __exit hdmi_remove_child(struct device *dev, void *data)
int r = 0;
size_t len = size;
struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *old_dssdev;
int match(struct omap_dss_device *dssdev, void *data)
{
if (len > 0 && dssdev == NULL)
return -EINVAL;
- if (dssdev)
+ if (dssdev) {
DSSDBG("display %s found\n", dssdev->name);
- if (mgr->output) {
- r = mgr->unset_output(mgr);
- if (r) {
- DSSERR("failed to unset current output\n");
+ if (omapdss_device_is_connected(dssdev)) {
+ DSSERR("new display is already connected\n");
+ r = -EINVAL;
+ goto put_device;
+ }
+
+ if (omapdss_device_is_enabled(dssdev)) {
+ DSSERR("new display is not disabled\n");
+ r = -EINVAL;
goto put_device;
}
}
- if (dssdev) {
- struct omap_dss_output *out = dssdev->output;
-
- /*
- * a registered device should have an output connected to it
- * already
- */
- if (!out) {
- DSSERR("device has no output connected to it\n");
+ old_dssdev = mgr->get_device(mgr);
+ if (old_dssdev) {
+ if (omapdss_device_is_enabled(old_dssdev)) {
+ DSSERR("old display is not disabled\n");
+ r = -EINVAL;
goto put_device;
}
- r = mgr->set_output(mgr, out);
+ old_dssdev->driver->disconnect(old_dssdev);
+ }
+
+ if (dssdev) {
+ r = dssdev->driver->connect(dssdev);
if (r) {
- DSSERR("failed to set manager output\n");
+ DSSERR("failed to connect new device\n");
+ goto put_device;
+ }
+
+ old_dssdev = mgr->get_device(mgr);
+ if (old_dssdev != dssdev) {
+ DSSERR("failed to connect device to this manager\n");
+ dssdev->driver->disconnect(dssdev);
goto put_device;
}
{
kobject_del(&mgr->kobj);
kobject_put(&mgr->kobj);
+
+ memset(&mgr->kobj, 0, sizeof(mgr->kobj));
}
static int num_managers;
static struct omap_overlay_manager *managers;
-int dss_init_overlay_managers(struct platform_device *pdev)
+int dss_init_overlay_managers(void)
{
- int i, r;
+ int i;
num_managers = dss_feat_get_num_mgrs();
dss_feat_get_supported_outputs(mgr->id);
INIT_LIST_HEAD(&mgr->overlays);
+ }
+
+ return 0;
+}
+
+int dss_init_overlay_managers_sysfs(struct platform_device *pdev)
+{
+ int i, r;
+
+ for (i = 0; i < num_managers; ++i) {
+ struct omap_overlay_manager *mgr = &managers[i];
r = dss_manager_kobj_init(mgr, pdev);
if (r)
return 0;
}
-void dss_uninit_overlay_managers(struct platform_device *pdev)
+void dss_uninit_overlay_managers(void)
+{
+ kfree(managers);
+ managers = NULL;
+ num_managers = 0;
+}
+
+void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev)
{
int i;
for (i = 0; i < num_managers; ++i) {
struct omap_overlay_manager *mgr = &managers[i];
+
dss_manager_kobj_uninit(mgr);
}
-
- kfree(managers);
- managers = NULL;
- num_managers = 0;
}
int omap_dss_get_num_overlay_managers(void)
static LIST_HEAD(output_list);
static DEFINE_MUTEX(output_lock);
-int omapdss_output_set_device(struct omap_dss_output *out,
+int omapdss_output_set_device(struct omap_dss_device *out,
struct omap_dss_device *dssdev)
{
int r;
goto err;
}
- if (out->type != dssdev->type) {
+ if (out->output_type != dssdev->type) {
DSSERR("output type and display type don't match\n");
r = -EINVAL;
goto err;
}
EXPORT_SYMBOL(omapdss_output_set_device);
-int omapdss_output_unset_device(struct omap_dss_output *out)
+int omapdss_output_unset_device(struct omap_dss_device *out)
{
int r;
}
EXPORT_SYMBOL(omapdss_output_unset_device);
-void dss_register_output(struct omap_dss_output *out)
+int omapdss_register_output(struct omap_dss_device *out)
{
list_add_tail(&out->list, &output_list);
+ return 0;
}
+EXPORT_SYMBOL(omapdss_register_output);
-void dss_unregister_output(struct omap_dss_output *out)
+void omapdss_unregister_output(struct omap_dss_device *out)
{
list_del(&out->list);
}
+EXPORT_SYMBOL(omapdss_unregister_output);
-struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
+struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id)
{
- struct omap_dss_output *out;
+ struct omap_dss_device *out;
list_for_each_entry(out, &output_list, list) {
if (out->id == id)
}
EXPORT_SYMBOL(omap_dss_get_output);
+struct omap_dss_device *omap_dss_find_output(const char *name)
+{
+ struct omap_dss_device *out;
+
+ list_for_each_entry(out, &output_list, list) {
+ if (strcmp(out->name, name) == 0)
+ return omap_dss_get_device(out);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(omap_dss_find_output);
+
+struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node)
+{
+ struct omap_dss_device *out;
+
+ list_for_each_entry(out, &output_list, list) {
+ if (out->dev->of_node == node)
+ return omap_dss_get_device(out);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(omap_dss_find_output_by_node);
+
+struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
+{
+ while (dssdev->output)
+ dssdev = dssdev->output;
+
+ if (dssdev->id != 0)
+ return omap_dss_get_device(dssdev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(omapdss_find_output_from_display);
+
+struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev)
+{
+ struct omap_dss_device *out;
+ struct omap_overlay_manager *mgr;
+
+ out = omapdss_find_output_from_display(dssdev);
+
+ if (out == NULL)
+ return NULL;
+
+ mgr = out->manager;
+
+ omap_dss_put_device(out);
+
+ return mgr;
+}
+EXPORT_SYMBOL(omapdss_find_mgr_from_display);
+
static const struct dss_mgr_ops *dss_mgr_ops;
int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
}
EXPORT_SYMBOL(dss_uninstall_mgr_ops);
+int dss_mgr_connect(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst)
+{
+ return dss_mgr_ops->connect(mgr, dst);
+}
+EXPORT_SYMBOL(dss_mgr_connect);
+
+void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst)
+{
+ dss_mgr_ops->disconnect(mgr, dst);
+}
+EXPORT_SYMBOL(dss_mgr_disconnect);
+
void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
const struct omap_video_timings *timings)
{
int data_lines;
struct rfbi_timings intf_timings;
- struct omap_dss_output output;
+ struct omap_dss_device output;
} rfbi;
static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
{
u32 l;
int r;
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = rfbi.output.manager;
u16 width = rfbi.timings.x_res;
u16 height = rfbi.timings.y_res;
static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = rfbi.output.manager;
struct dss_lcd_mgr_config mgr_config;
mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI;
int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = dssdev->output;
+ struct omap_dss_device *out = &rfbi.output;
int r;
if (out == NULL || out->manager == NULL) {
if (r)
return r;
- r = omap_dss_start_device(dssdev);
- if (r) {
- DSSERR("failed to start device\n");
- goto err0;
- }
-
r = dss_mgr_register_framedone_handler(out->manager,
framedone_callback, NULL);
if (r) {
return 0;
err1:
- omap_dss_stop_device(dssdev);
-err0:
rfbi_runtime_put();
return r;
}
void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = dssdev->output;
+ struct omap_dss_device *out = &rfbi.output;
dss_mgr_unregister_framedone_handler(out->manager,
framedone_callback, NULL);
- omap_dss_stop_device(dssdev);
rfbi_runtime_put();
}
static void rfbi_init_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &rfbi.output;
+ struct omap_dss_device *out = &rfbi.output;
- out->pdev = pdev;
+ out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_DBI;
- out->type = OMAP_DISPLAY_TYPE_DBI;
+ out->output_type = OMAP_DISPLAY_TYPE_DBI;
out->name = "rfbi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+ out->owner = THIS_MODULE;
- dss_register_output(out);
+ omapdss_register_output(out);
}
static void __exit rfbi_uninit_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &rfbi.output;
+ struct omap_dss_device *out = &rfbi.output;
- dss_unregister_output(out);
+ omapdss_unregister_output(out);
}
/* RFBI HW IP initialisation */
rfbi_init_output(pdev);
- r = rfbi_probe_pdata(pdev);
- if (r) {
- rfbi_uninit_output(pdev);
- pm_runtime_disable(&pdev->dev);
- return r;
+ if (pdev->dev.platform_data) {
+ r = rfbi_probe_pdata(pdev);
+ if (r)
+ goto err_probe;
}
return 0;
+err_probe:
+ rfbi_uninit_output(pdev);
err_runtime_get:
pm_runtime_disable(&pdev->dev);
return r;
#include "dss.h"
static struct {
+ struct platform_device *pdev;
+
bool update_enabled;
struct regulator *vdds_sdi_reg;
struct omap_video_timings timings;
int datapairs;
- struct omap_dss_output output;
+ struct omap_dss_device output;
} sdi;
struct sdi_clk_calc_ctx {
static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = sdi.output.manager;
sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = dssdev->output;
+ struct omap_dss_device *out = &sdi.output;
struct omap_video_timings *t = &sdi.timings;
struct dss_clock_info dss_cinfo;
struct dispc_clock_info dispc_cinfo;
return -ENODEV;
}
- r = omap_dss_start_device(dssdev);
- if (r) {
- DSSERR("failed to start device\n");
- goto err_start_dev;
- }
-
r = regulator_enable(sdi.vdds_sdi_reg);
if (r)
goto err_reg_enable;
err_get_dispc:
regulator_disable(sdi.vdds_sdi_reg);
err_reg_enable:
- omap_dss_stop_device(dssdev);
-err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_sdi_display_enable);
void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = sdi.output.manager;
dss_mgr_disable(mgr);
dispc_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
-
- omap_dss_stop_device(dssdev);
}
EXPORT_SYMBOL(omapdss_sdi_display_disable);
}
EXPORT_SYMBOL(omapdss_sdi_set_timings);
+static void sdi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = sdi.timings;
+}
+
+static int sdi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct omap_overlay_manager *mgr = sdi.output.manager;
+
+ if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
+ return -EINVAL;
+
+ if (timings->pixel_clock == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
void omapdss_sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
{
sdi.datapairs = datapairs;
}
EXPORT_SYMBOL(omapdss_sdi_set_datapairs);
-static int sdi_init_display(struct omap_dss_device *dssdev)
+static int sdi_init_regulator(void)
{
- DSSDBG("SDI init\n");
+ struct regulator *vdds_sdi;
- if (sdi.vdds_sdi_reg == NULL) {
- struct regulator *vdds_sdi;
+ if (sdi.vdds_sdi_reg)
+ return 0;
- vdds_sdi = dss_get_vdds_sdi();
+ vdds_sdi = dss_get_vdds_sdi();
+ if (IS_ERR(vdds_sdi)) {
+ vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
if (IS_ERR(vdds_sdi)) {
DSSERR("can't get VDDS_SDI regulator\n");
return PTR_ERR(vdds_sdi);
}
-
- sdi.vdds_sdi_reg = vdds_sdi;
}
+ sdi.vdds_sdi_reg = vdds_sdi;
+
return 0;
}
dss_copy_device_pdata(dssdev, plat_dssdev);
- r = sdi_init_display(dssdev);
+ r = sdi_init_regulator();
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
return 0;
}
+static int sdi_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ r = sdi_init_regulator();
+ if (r)
+ return r;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dst->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void sdi_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->device);
+
+ if (dst != dssdev->device)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static const struct omapdss_sdi_ops sdi_ops = {
+ .connect = sdi_connect,
+ .disconnect = sdi_disconnect,
+
+ .enable = omapdss_sdi_display_enable,
+ .disable = omapdss_sdi_display_disable,
+
+ .check_timings = sdi_check_timings,
+ .set_timings = omapdss_sdi_set_timings,
+ .get_timings = sdi_get_timings,
+
+ .set_datapairs = omapdss_sdi_set_datapairs,
+};
+
static void sdi_init_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &sdi.output;
+ struct omap_dss_device *out = &sdi.output;
- out->pdev = pdev;
+ out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
- out->type = OMAP_DISPLAY_TYPE_SDI;
+ out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+ out->ops.sdi = &sdi_ops;
+ out->owner = THIS_MODULE;
- dss_register_output(out);
+ omapdss_register_output(out);
}
static void __exit sdi_uninit_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &sdi.output;
+ struct omap_dss_device *out = &sdi.output;
- dss_unregister_output(out);
+ omapdss_unregister_output(out);
}
static int omap_sdi_probe(struct platform_device *pdev)
{
int r;
+ sdi.pdev = pdev;
+
sdi_init_output(pdev);
- r = sdi_probe_pdata(pdev);
- if (r) {
- sdi_uninit_output(pdev);
- return r;
+ if (pdev->dev.platform_data) {
+ r = sdi_probe_pdata(pdev);
+ if (r)
+ goto err_probe;
}
return 0;
+
+err_probe:
+ sdi_uninit_output(pdev);
+ return r;
}
static int __exit omap_sdi_remove(struct platform_device *pdev)
int (*read_edid)(struct hdmi_ip_data *ip_data, u8 *edid, int len);
- bool (*detect)(struct hdmi_ip_data *ip_data);
-
int (*pll_enable)(struct hdmi_ip_data *ip_data);
void (*pll_disable)(struct hdmi_ip_data *ip_data);
unsigned long core_av_offset;
unsigned long pll_offset;
unsigned long phy_offset;
+ int irq;
const struct ti_hdmi_ip_ops *ops;
struct hdmi_config cfg;
struct hdmi_pll_info pll_data;
struct hdmi_core_infoframe_avi avi_cfg;
/* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
- int hpd_gpio;
struct mutex lock;
};
int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/seq_file.h>
-#include <linux/gpio.h>
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
#include <sound/asound.h>
#include <sound/asoundef.h>
#include "dss.h"
#include "dss_features.h"
+#define HDMI_IRQ_LINK_CONNECT (1 << 25)
+#define HDMI_IRQ_LINK_DISCONNECT (1 << 26)
+
static inline void hdmi_write_reg(void __iomem *base_addr,
const u16 idx, u32 val)
{
hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
}
-static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
+static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
- bool hpd;
- int r;
-
- mutex_lock(&ip_data->lock);
-
- hpd = gpio_get_value(ip_data->hpd_gpio);
+ struct hdmi_ip_data *ip_data = data;
+ void __iomem *wp_base = hdmi_wp_base(ip_data);
+ u32 irqstatus;
+
+ irqstatus = hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
+ hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS, irqstatus);
+ /* flush posted write */
+ hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
+
+ if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
+ irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+ /*
+ * If we get both connect and disconnect interrupts at the same
+ * time, turn off the PHY, clear interrupts, and restart, which
+ * raises connect interrupt if a cable is connected, or nothing
+ * if cable is not connected.
+ */
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
- if (hpd)
- r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
- else
- r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
+ hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS,
+ HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
+ /* flush posted write */
+ hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
- if (r) {
- DSSERR("Failed to %s PHY TX power\n",
- hpd ? "enable" : "disable");
- goto err;
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
+ } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
+ } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
}
-err:
- mutex_unlock(&ip_data->lock);
- return r;
-}
-
-static irqreturn_t hpd_irq_handler(int irq, void *data)
-{
- struct hdmi_ip_data *ip_data = data;
-
- hdmi_check_hpd_state(ip_data);
-
return IRQ_HANDLED;
}
u16 r = 0;
void __iomem *phy_base = hdmi_phy_base(ip_data);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_CLR,
+ 0xffffffff);
+
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQSTATUS,
+ HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
+
r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
if (r)
return r;
/* Write to phy address 3 to change the polarity control */
REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
- r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
- NULL, hpd_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT, "hpd", ip_data);
+ r = request_threaded_irq(ip_data->irq, NULL, hdmi_irq_handler,
+ IRQF_ONESHOT, "OMAP HDMI", ip_data);
if (r) {
- DSSERR("HPD IRQ request failed\n");
+ DSSERR("HDMI IRQ request failed\n");
hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
return r;
}
- r = hdmi_check_hpd_state(ip_data);
- if (r) {
- free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
- return r;
- }
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_SET,
+ HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
}
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
{
- free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
+ free_irq(ip_data->irq, ip_data);
hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
}
return l;
}
-bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data)
-{
- return gpio_get_value(ip_data->hpd_gpio);
-}
-
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
struct hdmi_core_infoframe_avi *avi_cfg,
struct hdmi_core_packet_enable_repeat *repeat_cfg)
#define HDMI_WP_IRQSTATUS 0x28
#define HDMI_WP_PWR_CTRL 0x40
#define HDMI_WP_IRQENABLE_SET 0x2C
+#define HDMI_WP_IRQENABLE_CLR 0x30
#define HDMI_WP_VIDEO_CFG 0x50
#define HDMI_WP_VIDEO_SIZE 0x60
#define HDMI_WP_VIDEO_TIMING_H 0x68
enum omap_dss_venc_type type;
bool invert_polarity;
- struct omap_dss_output output;
+ struct omap_dss_device output;
} venc;
static inline void venc_write_reg(int idx, u32 val)
static int venc_power_on(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = venc.output.manager;
u32 l;
int r;
static void venc_power_off(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = venc.output.manager;
venc_write_reg(VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(0);
venc_runtime_put();
}
-unsigned long venc_get_pixel_clock(void)
-{
- /* VENC Pixel Clock in Mhz */
- return 13500000;
-}
-
int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = dssdev->output;
+ struct omap_dss_device *out = &venc.output;
int r;
DSSDBG("venc_display_enable\n");
goto err0;
}
- r = omap_dss_start_device(dssdev);
- if (r) {
- DSSERR("failed to start device\n");
- goto err0;
- }
-
r = venc_power_on(dssdev);
if (r)
- goto err1;
+ goto err0;
venc.wss_data = 0;
mutex_unlock(&venc.venc_lock);
return 0;
-err1:
- omap_dss_stop_device(dssdev);
err0:
mutex_unlock(&venc.venc_lock);
return r;
venc_power_off(dssdev);
- omap_dss_stop_device(dssdev);
-
mutex_unlock(&venc.venc_lock);
}
venc.timings = *timings;
+ dispc_set_tv_pclk(13500000);
+
mutex_unlock(&venc.venc_lock);
}
return -EINVAL;
}
+static void venc_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ mutex_lock(&venc.venc_lock);
+
+ *timings = venc.timings;
+
+ mutex_unlock(&venc.venc_lock);
+}
+
u32 omapdss_venc_get_wss(struct omap_dss_device *dssdev)
{
/* Invert due to VENC_L21_WC_CTL:INV=1 */
mutex_unlock(&venc.venc_lock);
}
-static int venc_init_display(struct omap_dss_device *dssdev)
+static int venc_init_regulator(void)
{
- DSSDBG("init_display\n");
-
- if (venc.vdda_dac_reg == NULL) {
- struct regulator *vdda_dac;
+ struct regulator *vdda_dac;
- vdda_dac = regulator_get(&venc.pdev->dev, "vdda_dac");
+ if (venc.vdda_dac_reg != NULL)
+ return 0;
- if (IS_ERR(vdda_dac)) {
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(vdda_dac);
- }
+ vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac");
- venc.vdda_dac_reg = vdda_dac;
+ if (IS_ERR(vdda_dac)) {
+ DSSERR("can't get VDDA_DAC regulator\n");
+ return PTR_ERR(vdda_dac);
}
+ venc.vdda_dac_reg = vdda_dac;
+
return 0;
}
if (!plat_dssdev)
return 0;
+ r = venc_init_regulator();
+ if (r)
+ return r;
+
dssdev = dss_alloc_and_init_device(&vencdev->dev);
if (!dssdev)
return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
- r = venc_init_display(dssdev);
- if (r) {
- DSSERR("device %s init failed: %d\n", dssdev->name, r);
- dss_put_device(dssdev);
- return r;
- }
-
r = omapdss_output_set_device(&venc.output, dssdev);
if (r) {
DSSERR("failed to connect output to new device: %s\n",
return 0;
}
+static int venc_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ r = venc_init_regulator();
+ if (r)
+ return r;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dst->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void venc_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->device);
+
+ if (dst != dssdev->device)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static const struct omapdss_atv_ops venc_ops = {
+ .connect = venc_connect,
+ .disconnect = venc_disconnect,
+
+ .enable = omapdss_venc_display_enable,
+ .disable = omapdss_venc_display_disable,
+
+ .check_timings = omapdss_venc_check_timings,
+ .set_timings = omapdss_venc_set_timings,
+ .get_timings = venc_get_timings,
+
+ .set_type = omapdss_venc_set_type,
+ .invert_vid_out_polarity = omapdss_venc_invert_vid_out_polarity,
+
+ .set_wss = omapdss_venc_set_wss,
+ .get_wss = omapdss_venc_get_wss,
+};
+
static void venc_init_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &venc.output;
+ struct omap_dss_device *out = &venc.output;
- out->pdev = pdev;
+ out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
- out->type = OMAP_DISPLAY_TYPE_VENC;
+ out->output_type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops.atv = &venc_ops;
+ out->owner = THIS_MODULE;
- dss_register_output(out);
+ omapdss_register_output(out);
}
static void __exit venc_uninit_output(struct platform_device *pdev)
{
- struct omap_dss_output *out = &venc.output;
+ struct omap_dss_device *out = &venc.output;
- dss_unregister_output(out);
+ omapdss_unregister_output(out);
}
/* VENC HW IP initialisation */
venc_init_output(pdev);
- r = venc_probe_pdata(pdev);
- if (r) {
- venc_panel_exit();
- venc_uninit_output(pdev);
- pm_runtime_disable(&pdev->dev);
- return r;
+ if (pdev->dev.platform_data) {
+ r = venc_probe_pdata(pdev);
+ if (r)
+ goto err_probe;
}
return 0;
+err_probe:
+ venc_panel_exit();
+ venc_uninit_output(pdev);
err_panel_init:
err_runtime_get:
pm_runtime_disable(&pdev->dev);
{
dss_unregister_child_devices(&pdev->dev);
- if (venc.vdda_dac_reg != NULL) {
- regulator_put(venc.vdda_dac_reg);
- venc.vdda_dac_reg = NULL;
- }
-
venc_panel_exit();
venc_uninit_output(pdev);
dssdev->panel.timings = default_timings;
- return device_create_file(&dssdev->dev, &dev_attr_output_type);
+ return device_create_file(dssdev->dev, &dev_attr_output_type);
}
static void venc_panel_remove(struct omap_dss_device *dssdev)
{
- device_remove_file(&dssdev->dev, &dev_attr_output_type);
+ device_remove_file(dssdev->dev, &dev_attr_output_type);
}
static int venc_panel_enable(struct omap_dss_device *dssdev)
{
int r;
- dev_dbg(&dssdev->dev, "venc_panel_enable\n");
+ dev_dbg(dssdev->dev, "venc_panel_enable\n");
mutex_lock(&venc_panel.lock);
static void venc_panel_disable(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "venc_panel_disable\n");
+ dev_dbg(dssdev->dev, "venc_panel_disable\n");
mutex_lock(&venc_panel.lock);
static void venc_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- dev_dbg(&dssdev->dev, "venc_panel_set_timings\n");
+ dev_dbg(dssdev->dev, "venc_panel_set_timings\n");
mutex_lock(&venc_panel.lock);
static int venc_panel_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- dev_dbg(&dssdev->dev, "venc_panel_check_timings\n");
+ dev_dbg(dssdev->dev, "venc_panel_check_timings\n");
return omapdss_venc_check_timings(dssdev, timings);
}
static u32 venc_panel_get_wss(struct omap_dss_device *dssdev)
{
- dev_dbg(&dssdev->dev, "venc_panel_get_wss\n");
+ dev_dbg(dssdev->dev, "venc_panel_get_wss\n");
return omapdss_venc_get_wss(dssdev);
}
static int venc_panel_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
- dev_dbg(&dssdev->dev, "venc_panel_set_wss\n");
+ dev_dbg(dssdev->dev, "venc_panel_set_wss\n");
return omapdss_venc_set_wss(dssdev, wss);
}
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
- if (!display || !display->output || !display->output->manager) {
+
+ if (!display) {
r = -EINVAL;
break;
}
- mgr = display->output->manager;
+ mgr = omapdss_find_mgr_from_display(display);
+ if (!mgr) {
+ r = -EINVAL;
+ break;
+ }
r = mgr->wait_for_vsync(mgr);
break;
if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
dssdev->driver->disable(dssdev);
+ dssdev->driver->disconnect(dssdev);
+
omap_dss_put_device(dssdev);
}
int i, r;
struct omap_overlay_manager *mgr;
- if (!def_dssdev->output) {
- dev_err(fbdev->dev, "no output for the default display\n");
- return -EINVAL;
+ r = def_dssdev->driver->connect(def_dssdev);
+ if (r) {
+ dev_err(fbdev->dev, "failed to connect default display\n");
+ return r;
}
for (i = 0; i < fbdev->num_displays; ++i) {
struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
- struct omap_dss_output *out = dssdev->output;
-
- mgr = omap_dss_get_overlay_manager(out->dispc_channel);
- if (!mgr || !out)
+ if (dssdev == def_dssdev)
continue;
- if (mgr->output)
- mgr->unset_output(mgr);
-
- mgr->set_output(mgr, out);
+ /*
+ * We don't care if the connect succeeds or not. We just want to
+ * connect as many displays as possible.
+ */
+ dssdev->driver->connect(dssdev);
}
- mgr = def_dssdev->output->manager;
+ mgr = omapdss_find_mgr_from_display(def_dssdev);
if (!mgr) {
dev_err(fbdev->dev, "no ovl manager for the default display\n");
DBG("omapfb_probe\n");
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
if (pdev->num_resources != 0) {
dev_err(&pdev->dev, "probed for an unknown device\n");
r = -ENODEV;
if (def_display == NULL) {
dev_err(fbdev->dev, "failed to find default display\n");
- r = -EINVAL;
+ r = -EPROBE_DEFER;
goto cleanup;
}
/* first resource is the register res, the rest are vrfb contexts */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "can't get vrfb base address\n");
- return -EINVAL;
- }
-
vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(vrfb_base))
return PTR_ERR(vrfb_base);
r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
- info->fix.smem_start + vma->vm_pgoff << PAGE_SHIFT,
+ info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT),
vma->vm_start);
return r;
--- /dev/null
+/*
+ * Simplest possible simple frame-buffer driver, as a platform device
+ *
+ * Copyright (c) 2013, Stephen Warren
+ *
+ * Based on q40fb.c, which was:
+ * Copyright (C) 2001 Richard Zidlicky <rz@linux-m68k.org>
+ *
+ * Also based on offb.c, which was:
+ * Copyright (C) 1997 Geert Uytterhoeven
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct fb_fix_screeninfo simplefb_fix = {
+ .id = "simple",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo simplefb_var = {
+ .height = -1,
+ .width = -1,
+ .activate = FB_ACTIVATE_NOW,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ u32 *pal = info->pseudo_palette;
+ u32 cr = red >> (16 - info->var.red.length);
+ u32 cg = green >> (16 - info->var.green.length);
+ u32 cb = blue >> (16 - info->var.blue.length);
+ u32 value;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ value = (cr << info->var.red.offset) |
+ (cg << info->var.green.offset) |
+ (cb << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
+ pal[regno] = value;
+
+ return 0;
+}
+
+static struct fb_ops simplefb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = simplefb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+struct simplefb_format {
+ const char *name;
+ u32 bits_per_pixel;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+};
+
+static struct simplefb_format simplefb_formats[] = {
+ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} },
+};
+
+struct simplefb_params {
+ u32 width;
+ u32 height;
+ u32 stride;
+ struct simplefb_format *format;
+};
+
+static int simplefb_parse_dt(struct platform_device *pdev,
+ struct simplefb_params *params)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ const char *format;
+ int i;
+
+ ret = of_property_read_u32(np, "width", ¶ms->width);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't parse width property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "height", ¶ms->height);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't parse height property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "stride", ¶ms->stride);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't parse stride property\n");
+ return ret;
+ }
+
+ ret = of_property_read_string(np, "format", &format);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't parse format property\n");
+ return ret;
+ }
+ params->format = NULL;
+ for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) {
+ if (strcmp(format, simplefb_formats[i].name))
+ continue;
+ params->format = &simplefb_formats[i];
+ break;
+ }
+ if (!params->format) {
+ dev_err(&pdev->dev, "Invalid format value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int simplefb_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct simplefb_params params;
+ struct fb_info *info;
+ struct resource *mem;
+
+ if (fb_get_options("simplefb", NULL))
+ return -ENODEV;
+
+ ret = simplefb_parse_dt(pdev, ¶ms);
+ if (ret)
+ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ return -EINVAL;
+ }
+
+ info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
+ if (!info)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, info);
+
+ info->fix = simplefb_fix;
+ info->fix.smem_start = mem->start;
+ info->fix.smem_len = resource_size(mem);
+ info->fix.line_length = params.stride;
+
+ info->var = simplefb_var;
+ info->var.xres = params.width;
+ info->var.yres = params.height;
+ info->var.xres_virtual = params.width;
+ info->var.yres_virtual = params.height;
+ info->var.bits_per_pixel = params.format->bits_per_pixel;
+ info->var.red = params.format->red;
+ info->var.green = params.format->green;
+ info->var.blue = params.format->blue;
+ info->var.transp = params.format->transp;
+
+ info->fbops = &simplefb_ops;
+ info->flags = FBINFO_DEFAULT;
+ info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
+ info->fix.smem_len);
+ if (!info->screen_base) {
+ framebuffer_release(info);
+ return -ENODEV;
+ }
+ info->pseudo_palette = (void *)(info + 1);
+
+ ret = register_framebuffer(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+ framebuffer_release(info);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
+
+ return 0;
+}
+
+static int simplefb_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+static const struct of_device_id simplefb_of_match[] = {
+ { .compatible = "simple-framebuffer", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, simplefb_of_match);
+
+static struct platform_driver simplefb_driver = {
+ .driver = {
+ .name = "simple-framebuffer",
+ .owner = THIS_MODULE,
+ .of_match_table = simplefb_of_match,
+ },
+ .probe = simplefb_probe,
+ .remove = simplefb_remove,
+};
+module_platform_driver(simplefb_driver);
+
+MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
+MODULE_DESCRIPTION("Simple framebuffer driver");
+MODULE_LICENSE("GPL v2");
platform_set_drvdata(pdev, hdq_data);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(&pdev->dev, "unable to get resource\n");
- return -ENXIO;
- }
-
hdq_data->hdq_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource found\n");
- return -EINVAL;
- }
-
wdt_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
dev_info(dev, "heartbeat %d sec\n", heartbeat);
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (wdt_mem == NULL) {
- dev_err(dev, "failed to get memory region resource\n");
- return -ENOENT;
- }
-
wdt_base = devm_ioremap_resource(dev, wdt_mem);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't get device resources\n");
- return -ENODEV;
- }
-
imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(imx2_wdt.base))
return PTR_ERR(imx2_wdt.base);
by the current usage of anonymous memory ("committed AS") and
controlled by various sysfs-settable parameters. Configuring
FRONTSWAP is highly recommended; if it is not configured, self-
- ballooning is disabled by default but can be enabled with the
- 'selfballooning' kernel boot parameter. If FRONTSWAP is configured,
+ ballooning is disabled by default. If FRONTSWAP is configured,
frontswap-selfshrinking is enabled by default but can be disabled
- with the 'noselfshrink' kernel boot parameter; and self-ballooning
- is enabled by default but can be disabled with the 'noselfballooning'
+ with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
+ is enabled by default but can be disabled with the 'tmem.selfballooning=0'
kernel boot parameter. Note that systems without a sufficiently
large swap device should not enable self-ballooning.
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
- if ((page = alloc_page(gfp)) == NULL) {
+ page = alloc_page(gfp);
+ if (page == NULL) {
nr_pages = i;
state = BP_EAGAIN;
break;
struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
+ if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
xen_unmap_domain_mfn_range(vma, numpgs, pages);
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/cleancache.h>
-
-/* temporary ifdef until include/linux/frontswap.h is upstream */
-#ifdef CONFIG_FRONTSWAP
#include <linux/frontswap.h>
-#endif
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
#include <xen/tmem.h>
+#ifndef CONFIG_XEN_TMEM_MODULE
+bool __read_mostly tmem_enabled = false;
+
+static int __init enable_tmem(char *s)
+{
+ tmem_enabled = true;
+ return 1;
+}
+__setup("tmem", enable_tmem);
+#endif
+
+#ifdef CONFIG_CLEANCACHE
+static bool cleancache __read_mostly = true;
+module_param(cleancache, bool, S_IRUGO);
+static bool selfballooning __read_mostly = true;
+module_param(selfballooning, bool, S_IRUGO);
+#endif /* CONFIG_CLEANCACHE */
+
+#ifdef CONFIG_FRONTSWAP
+static bool frontswap __read_mostly = true;
+module_param(frontswap, bool, S_IRUGO);
+#else /* CONFIG_FRONTSWAP */
+#define frontswap (0)
+#endif /* CONFIG_FRONTSWAP */
+
+#ifdef CONFIG_XEN_SELFBALLOONING
+static bool selfshrinking __read_mostly = true;
+module_param(selfshrinking, bool, S_IRUGO);
+#endif /* CONFIG_XEN_SELFBALLOONING */
+
#define TMEM_CONTROL 0
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}
-#ifndef CONFIG_XEN_TMEM_MODULE
-bool __read_mostly tmem_enabled = false;
-
-static int __init enable_tmem(char *s)
-{
- tmem_enabled = true;
- return 1;
-}
-__setup("tmem", enable_tmem);
-#endif
#ifdef CONFIG_CLEANCACHE
static int xen_tmem_destroy_pool(u32 pool_id)
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
}
-static bool disable_cleancache __read_mostly;
-static bool disable_selfballooning __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_selfballooning, bool, S_IRUGO);
-#else
-static int __init no_cleancache(char *s)
-{
- disable_cleancache = true;
- return 1;
-}
-__setup("nocleancache", no_cleancache);
-#endif
-
static struct cleancache_ops tmem_cleancache_ops = {
.put_page = tmem_cleancache_put_page,
.get_page = tmem_cleancache_get_page,
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
}
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_selfshrinking __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_frontswap, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
-#else
-static int __init no_frontswap(char *s)
-{
- disable_frontswap = true;
- return 1;
-}
-__setup("nofrontswap", no_frontswap);
-#endif
-
static struct frontswap_ops tmem_frontswap_ops = {
.store = tmem_frontswap_store,
.load = tmem_frontswap_load,
.invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init
};
-#else /* CONFIG_FRONTSWAP */
-#define disable_frontswap_selfshrinking 1
#endif
static int xen_tmem_init(void)
if (!xen_domain())
return 0;
#ifdef CONFIG_FRONTSWAP
- if (tmem_enabled && !disable_frontswap) {
+ if (tmem_enabled && frontswap) {
char *s = "";
struct frontswap_ops *old_ops =
frontswap_register_ops(&tmem_frontswap_ops);
#endif
#ifdef CONFIG_CLEANCACHE
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
- if (tmem_enabled && !disable_cleancache) {
+ if (tmem_enabled && cleancache) {
char *s = "";
struct cleancache_ops *old_ops =
cleancache_register_ops(&tmem_cleancache_ops);
}
#endif
#ifdef CONFIG_XEN_SELFBALLOONING
- xen_selfballoon_init(!disable_selfballooning,
- !disable_frontswap_selfshrinking);
+ /*
+ * There is no point of driving pages to the swap system if they
+ * aren't going anywhere in tmem universe.
+ */
+ if (!frontswap) {
+ selfshrinking = false;
+ selfballooning = false;
+ }
+ xen_selfballoon_init(selfballooning, selfshrinking);
#endif
return 0;
}
else
pci_restore_state(dev);
- if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+ if (dev->msix_cap) {
struct physdev_pci_device ppdev = {
.seg = pci_domain_nr(dev->bus),
.bus = dev->bus->number,
if (err)
goto config_release;
- if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+ if (dev->msix_cap) {
struct physdev_pci_device ppdev = {
.seg = pci_domain_nr(dev->bus),
.bus = dev->bus->number,
* System configuration note: Selfballooning should not be enabled on
* systems without a sufficiently large swap device configured; for best
* results, it is recommended that total swap be increased by the size
- * of the guest memory. Also, while technically not required to be
- * configured, it is highly recommended that frontswap also be configured
- * and enabled when selfballooning is running. So, selfballooning
- * is disabled by default if frontswap is not configured and can only
- * be enabled with the "selfballooning" kernel boot option; similarly
- * selfballooning is enabled by default if frontswap is configured and
- * can be disabled with the "noselfballooning" kernel boot option. Finally,
- * when frontswap is configured, frontswap-selfshrinking can be disabled
- * with the "noselfshrink" kernel boot option.
+ * of the guest memory. Note, that selfballooning should be disabled by default
+ * if frontswap is not configured. Similarly selfballooning should be enabled
+ * by default if frontswap is configured and can be disabled with the
+ * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is
+ * configured, frontswap-selfshrinking can be disabled with the
+ * "tmem.selfshrink=0" kernel boot option.
*
* Selfballooning is disallowed in domain0 and force-disabled.
*
/* Enable/disable with sysfs. */
static bool frontswap_selfshrinking __read_mostly;
-/* Enable/disable with kernel boot option. */
-static bool use_frontswap_selfshrink = true;
-
/*
* The default values for the following parameters were deemed reasonable
* by experimentation, may be workload-dependent, and can all be
frontswap_shrink(tgt_frontswap_pages);
}
-static int __init xen_nofrontswap_selfshrink_setup(char *s)
-{
- use_frontswap_selfshrink = false;
- return 1;
-}
-
-__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
-
-/* Disable with kernel boot option. */
-static bool use_selfballooning = true;
-
-static int __init xen_noselfballooning_setup(char *s)
-{
- use_selfballooning = false;
- return 1;
-}
-
-__setup("noselfballooning", xen_noselfballooning_setup);
-#else /* !CONFIG_FRONTSWAP */
-/* Enable with kernel boot option. */
-static bool use_selfballooning;
-
-static int __init xen_selfballooning_setup(char *s)
-{
- use_selfballooning = true;
- return 1;
-}
-
-__setup("selfballooning", xen_selfballooning_setup);
#endif /* CONFIG_FRONTSWAP */
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
if (err)
- goto out_err;
+ goto out_err_free_ballooned_pages;
spin_lock(&xenbus_valloc_lock);
list_add(&node->next, &xenbus_valloc_pages);
*vaddr = addr;
return 0;
- out_err:
+ out_err_free_ballooned_pages:
free_xenballooned_pages(1, &node->page);
+ out_err:
kfree(node);
return err;
}
int xs_input_avail(void);
extern struct xenstore_domain_interface *xen_store_interface;
extern int xen_store_evtchn;
+extern enum xenstore_init xen_store_domain_type;
extern const struct file_operations xen_xenbus_fops;
return err;
}
-static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
+ unsigned long data)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
- case IOCTL_XENBUS_BACKEND_EVTCHN:
- if (xen_store_evtchn > 0)
- return xen_store_evtchn;
- return -ENODEV;
-
- case IOCTL_XENBUS_BACKEND_SETUP:
- return xenbus_alloc(data);
-
- default:
- return -ENOTTY;
+ case IOCTL_XENBUS_BACKEND_EVTCHN:
+ if (xen_store_evtchn > 0)
+ return xen_store_evtchn;
+ return -ENODEV;
+ case IOCTL_XENBUS_BACKEND_SETUP:
+ return xenbus_alloc(data);
+ default:
+ return -ENOTTY;
}
}
struct xenstore_domain_interface *xen_store_interface;
EXPORT_SYMBOL_GPL(xen_store_interface);
+enum xenstore_init xen_store_domain_type;
+EXPORT_SYMBOL_GPL(xen_store_domain_type);
+
static unsigned long xen_store_mfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
return err;
}
-enum xenstore_init {
- UNKNOWN,
- PV,
- HVM,
- LOCAL,
-};
static int __init xenbus_init(void)
{
int err = 0;
- enum xenstore_init usage = UNKNOWN;
uint64_t v = 0;
+ xen_store_domain_type = XS_UNKNOWN;
if (!xen_domain())
return -ENODEV;
xenbus_ring_ops_init();
if (xen_pv_domain())
- usage = PV;
+ xen_store_domain_type = XS_PV;
if (xen_hvm_domain())
- usage = HVM;
+ xen_store_domain_type = XS_HVM;
if (xen_hvm_domain() && xen_initial_domain())
- usage = LOCAL;
+ xen_store_domain_type = XS_LOCAL;
if (xen_pv_domain() && !xen_start_info->store_evtchn)
- usage = LOCAL;
+ xen_store_domain_type = XS_LOCAL;
if (xen_pv_domain() && xen_start_info->store_evtchn)
xenstored_ready = 1;
- switch (usage) {
- case LOCAL:
+ switch (xen_store_domain_type) {
+ case XS_LOCAL:
err = xenstored_local_init();
if (err)
goto out_error;
xen_store_interface = mfn_to_virt(xen_store_mfn);
break;
- case PV:
+ case XS_PV:
xen_store_evtchn = xen_start_info->store_evtchn;
xen_store_mfn = xen_start_info->store_mfn;
xen_store_interface = mfn_to_virt(xen_store_mfn);
break;
- case HVM:
+ case XS_HVM:
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
if (err)
goto out_error;
struct bus_type bus;
};
+enum xenstore_init {
+ XS_UNKNOWN,
+ XS_PV,
+ XS_HVM,
+ XS_LOCAL,
+};
+
extern struct device_attribute xenbus_dev_attrs[];
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
#include "xenbus_probe.h"
+static struct workqueue_struct *xenbus_frontend_wq;
+
/* device/<type>/<id> => <type>-<id> */
static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
xenbus_otherend_changed(watch, vec, len, 1);
}
+static void xenbus_frontend_delayed_resume(struct work_struct *w)
+{
+ struct xenbus_device *xdev = container_of(w, struct xenbus_device, work);
+
+ xenbus_dev_resume(&xdev->dev);
+}
+
+static int xenbus_frontend_dev_resume(struct device *dev)
+{
+ /*
+ * If xenstored is running in this domain, we cannot access the backend
+ * state at the moment, so we need to defer xenbus_dev_resume
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct xenbus_device *xdev = to_xenbus_device(dev);
+
+ if (!xenbus_frontend_wq) {
+ pr_err("%s: no workqueue to process delayed resume\n",
+ xdev->nodename);
+ return -EFAULT;
+ }
+
+ INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
+ queue_work(xenbus_frontend_wq, &xdev->work);
+
+ return 0;
+ }
+
+ return xenbus_dev_resume(dev);
+}
+
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
- .resume = xenbus_dev_resume,
+ .resume = xenbus_frontend_dev_resume,
.freeze = xenbus_dev_suspend,
.thaw = xenbus_dev_cancel,
.restore = xenbus_dev_resume,
register_xenstore_notifier(&xenstore_notifier);
+ xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+
return 0;
}
subsys_initcall(xenbus_probe_frontend_init);
kunmap_atomic(ring);
while (atomic_read(&ctx->reqs_active) > 0) {
- wait_event(ctx->wait, head != ctx->tail);
+ wait_event(ctx->wait,
+ head != ctx->tail ||
+ atomic_read(&ctx->reqs_active) <= 0);
avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
* < min_nr if the timeout specified by timeout has elapsed
* before sufficient events are available, where timeout == NULL
* specifies an infinite timeout. Note that the timeout pointed to by
- * timeout is relative and will be updated if not NULL and the
- * operation blocks. Will fail with -ENOSYS if not implemented.
+ * timeout is relative. Will fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
result = filldir(dirent, keybuf, keysize, filp->f_pos,
(ino_t) value, d_type);
}
-
- filp->f_pos++;
+ if (!result)
+ filp->f_pos++;
befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
ref->parent, bsz, 0);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie);
unsigned int j;
DECLARE_COMPLETION_ONSTACK(complete);
- bio = bio_alloc(GFP_NOFS, num_pages - i);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
if (!bio) {
printk(KERN_INFO
"btrfsic: bio_alloc() for %u pages failed!\n",
BUG_ON(ret); /* -ENOMEM */
}
if (new_flags != 0) {
+ int level = btrfs_header_level(buf);
+
ret = btrfs_set_disk_extent_flags(trans, root,
buf->start,
buf->len,
- new_flags, 0);
+ new_flags, level, 0);
if (ret)
return ret;
}
/* holds checksums of all the data extents */
#define BTRFS_CSUM_TREE_OBJECTID 7ULL
-/* for storing balance parameters in the root tree */
-#define BTRFS_BALANCE_OBJECTID -4ULL
-
/* holds quota configuration and tracking */
#define BTRFS_QUOTA_TREE_OBJECTID 8ULL
+/* for storing balance parameters in the root tree */
+#define BTRFS_BALANCE_OBJECTID -4ULL
+
/* orhpan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags,
- int is_data);
+ int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
struct btrfs_delayed_extent_op {
struct btrfs_disk_key key;
u64 flags_to_set;
+ int level;
unsigned int update_key:1;
unsigned int update_flags:1;
unsigned int is_data:1;
struct btrfs_device *tgt_device = NULL;
struct btrfs_device *src_device = NULL;
+ if (btrfs_fs_incompat(fs_info, RAID56)) {
+ pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n");
+ return -EINVAL;
+ }
+
switch (args->start.cont_reading_from_srcdev_mode) {
case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
{ .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
{ .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
{ .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
- { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" },
+ { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
{ .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
{ .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
}
root->commit_root = btrfs_root_node(root);
- BUG_ON(!root->node); /* -ENOMEM */
out:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1;
{
free_extent_buffer(info->tree_root->node);
free_extent_buffer(info->tree_root->commit_root);
- free_extent_buffer(info->dev_root->node);
- free_extent_buffer(info->dev_root->commit_root);
- free_extent_buffer(info->extent_root->node);
- free_extent_buffer(info->extent_root->commit_root);
- free_extent_buffer(info->csum_root->node);
- free_extent_buffer(info->csum_root->commit_root);
- if (info->quota_root) {
- free_extent_buffer(info->quota_root->node);
- free_extent_buffer(info->quota_root->commit_root);
- }
-
info->tree_root->node = NULL;
info->tree_root->commit_root = NULL;
- info->dev_root->node = NULL;
- info->dev_root->commit_root = NULL;
- info->extent_root->node = NULL;
- info->extent_root->commit_root = NULL;
- info->csum_root->node = NULL;
- info->csum_root->commit_root = NULL;
+
+ if (info->dev_root) {
+ free_extent_buffer(info->dev_root->node);
+ free_extent_buffer(info->dev_root->commit_root);
+ info->dev_root->node = NULL;
+ info->dev_root->commit_root = NULL;
+ }
+ if (info->extent_root) {
+ free_extent_buffer(info->extent_root->node);
+ free_extent_buffer(info->extent_root->commit_root);
+ info->extent_root->node = NULL;
+ info->extent_root->commit_root = NULL;
+ }
+ if (info->csum_root) {
+ free_extent_buffer(info->csum_root->node);
+ free_extent_buffer(info->csum_root->commit_root);
+ info->csum_root->node = NULL;
+ info->csum_root->commit_root = NULL;
+ }
if (info->quota_root) {
+ free_extent_buffer(info->quota_root->node);
+ free_extent_buffer(info->quota_root->commit_root);
info->quota_root->node = NULL;
info->quota_root->commit_root = NULL;
}
-
if (chunk_root) {
free_extent_buffer(info->chunk_root->node);
free_extent_buffer(info->chunk_root->commit_root);
* caller
*/
device->flush_bio = NULL;
- bio = bio_alloc(GFP_NOFS, 0);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
if (!bio)
return -ENOMEM;
ordered_operations);
list_del_init(&btrfs_inode->ordered_operations);
+ spin_unlock(&root->fs_info->ordered_extent_lock);
btrfs_invalidate_inodes(btrfs_inode->root);
+
+ spin_lock(&root->fs_info->ordered_extent_lock);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
list_del_init(&btrfs_inode->delalloc_inodes);
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&btrfs_inode->runtime_flags);
+ spin_unlock(&root->fs_info->delalloc_lock);
btrfs_invalidate_inodes(btrfs_inode->root);
+
+ spin_lock(&root->fs_info->delalloc_lock);
}
spin_unlock(&root->fs_info->delalloc_lock);
while (start <= end) {
eb = btrfs_find_tree_block(root, start,
root->leafsize);
- start += eb->len;
+ start += root->leafsize;
if (!eb)
continue;
wait_on_extent_buffer_writeback(eb);
u32 item_size;
int ret;
int err = 0;
- int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
- node->type == BTRFS_SHARED_BLOCK_REF_KEY);
+ int metadata = !extent_op->is_data;
if (trans->aborted)
return 0;
key.objectid = node->bytenr;
if (metadata) {
- struct btrfs_delayed_tree_ref *tree_ref;
-
- tree_ref = btrfs_delayed_node_to_tree_ref(node);
key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = tree_ref->level;
+ key.offset = extent_op->level;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = node->num_bytes;
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags,
- int is_data)
+ int level, int is_data)
{
struct btrfs_delayed_extent_op *extent_op;
int ret;
extent_op->update_flags = 1;
extent_op->update_key = 0;
extent_op->is_data = is_data ? 1 : 0;
+ extent_op->level = level;
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
num_bytes, extent_op);
WARN_ON(ret);
if (i_size_read(inode) > 0) {
+ ret = btrfs_check_trunc_cache_free_space(root,
+ &root->fs_info->global_block_rsv);
+ if (ret)
+ goto out_put;
+
ret = btrfs_truncate_free_space_cache(root, trans, path,
inode);
if (ret)
fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
+ if (fs_info->quota_root)
+ fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
update_global_block_rsv(fs_info);
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
int ret;
+ bool global_updated = false;
block_rsv = get_block_rsv(trans, root);
- if (block_rsv->size == 0) {
- ret = reserve_metadata_bytes(root, block_rsv, blocksize,
- BTRFS_RESERVE_NO_FLUSH);
- /*
- * If we couldn't reserve metadata bytes try and use some from
- * the global reserve.
- */
- if (ret && block_rsv != global_rsv) {
- ret = block_rsv_use_bytes(global_rsv, blocksize);
- if (!ret)
- return global_rsv;
- return ERR_PTR(ret);
- } else if (ret) {
- return ERR_PTR(ret);
- }
+ if (unlikely(block_rsv->size == 0))
+ goto try_reserve;
+again:
+ ret = block_rsv_use_bytes(block_rsv, blocksize);
+ if (!ret)
return block_rsv;
+
+ if (block_rsv->failfast)
+ return ERR_PTR(ret);
+
+ if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
+ global_updated = true;
+ update_global_block_rsv(root->fs_info);
+ goto again;
}
- ret = block_rsv_use_bytes(block_rsv, blocksize);
+ if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL * 10,
+ /*DEFAULT_RATELIMIT_BURST*/ 1);
+ if (__ratelimit(&_rs))
+ WARN(1, KERN_DEBUG
+ "btrfs: block rsv returned %d\n", ret);
+ }
+try_reserve:
+ ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+ BTRFS_RESERVE_NO_FLUSH);
if (!ret)
return block_rsv;
- if (ret && !block_rsv->failfast) {
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
- static DEFINE_RATELIMIT_STATE(_rs,
- DEFAULT_RATELIMIT_INTERVAL * 10,
- /*DEFAULT_RATELIMIT_BURST*/ 1);
- if (__ratelimit(&_rs))
- WARN(1, KERN_DEBUG
- "btrfs: block rsv returned %d\n", ret);
- }
- ret = reserve_metadata_bytes(root, block_rsv, blocksize,
- BTRFS_RESERVE_NO_FLUSH);
- if (!ret) {
- return block_rsv;
- } else if (ret && block_rsv != global_rsv) {
- ret = block_rsv_use_bytes(global_rsv, blocksize);
- if (!ret)
- return global_rsv;
- }
+ /*
+ * If we couldn't reserve metadata bytes try and use some from
+ * the global reserve if its space type is the same as the global
+ * reservation.
+ */
+ if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
+ block_rsv->space_info == global_rsv->space_info) {
+ ret = block_rsv_use_bytes(global_rsv, blocksize);
+ if (!ret)
+ return global_rsv;
}
-
- return ERR_PTR(-ENOSPC);
+ return ERR_PTR(ret);
}
static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
extent_op->update_key = 1;
extent_op->update_flags = 1;
extent_op->is_data = 0;
+ extent_op->level = level;
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
ins.objectid,
ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
- eb->len, flag, 0);
+ eb->len, flag,
+ btrfs_header_level(eb), 0);
BUG_ON(ret); /* -ENOMEM */
wc->flags[level] |= flag;
}
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
+static struct bio_set *btrfs_bioset;
#ifdef CONFIG_BTRFS_DEBUG
static LIST_HEAD(buffers);
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
goto free_state_cache;
+
+ btrfs_bioset = bioset_create(BIO_POOL_SIZE,
+ offsetof(struct btrfs_io_bio, bio));
+ if (!btrfs_bioset)
+ goto free_buffer_cache;
return 0;
+free_buffer_cache:
+ kmem_cache_destroy(extent_buffer_cache);
+ extent_buffer_cache = NULL;
+
free_state_cache:
kmem_cache_destroy(extent_state_cache);
+ extent_state_cache = NULL;
return -ENOMEM;
}
kmem_cache_destroy(extent_state_cache);
if (extent_buffer_cache)
kmem_cache_destroy(extent_buffer_cache);
+ if (btrfs_bioset)
+ bioset_free(btrfs_bioset);
}
void extent_io_tree_init(struct extent_io_tree *tree,
SetPageUptodate(page);
}
-/*
- * helper function to unlock a page if all the extents in the tree
- * for that page are unlocked
- */
-static void check_page_locked(struct extent_io_tree *tree, struct page *page)
-{
- u64 start = page_offset(page);
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
- unlock_page(page);
-}
-
-/*
- * helper function to end page writeback if all the extents
- * in the tree for that page are done with writeback
- */
-static void check_page_writeback(struct extent_io_tree *tree,
- struct page *page)
-{
- end_page_writeback(page);
-}
-
/*
* When IO fails, either with EIO or csum verification fails, we
* try other mirrors that might have a good copy of the data. This
if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
return 0;
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
bio->bi_private = &compl;
return -EIO;
}
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio) {
free_io_failure(inode, failrec, 0);
return -EIO;
struct extent_io_tree *tree;
u64 start;
u64 end;
- int whole_page;
do {
struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = page_offset(page) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
+ /* We always issue full-page reads, but if some block
+ * in a page fails to read, blk_update_request() will
+ * advance bv_offset and adjust bv_len to compensate.
+ * Print a warning for nonzero offsets, and an error
+ * if they don't add up to a full page. */
+ if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
+ printk("%s page write in btrfs with offset %u and length %u\n",
+ bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
+ ? KERN_ERR "partial" : KERN_INFO "incomplete",
+ bvec->bv_offset, bvec->bv_len);
- if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
- whole_page = 1;
- else
- whole_page = 0;
+ start = page_offset(page);
+ end = start + bvec->bv_offset + bvec->bv_len - 1;
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
if (end_extent_writepage(page, err, start, end))
continue;
- if (whole_page)
- end_page_writeback(page);
- else
- check_page_writeback(tree, page);
+ end_page_writeback(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
struct extent_io_tree *tree;
u64 start;
u64 end;
- int whole_page;
int mirror;
int ret;
struct page *page = bvec->bv_page;
struct extent_state *cached = NULL;
struct extent_state *state;
+ struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
- "mirror=%ld\n", (u64)bio->bi_sector, err,
- (long int)bio->bi_bdev);
+ "mirror=%lu\n", (u64)bio->bi_sector, err,
+ io_bio->mirror_num);
tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = page_offset(page) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
+ /* We always issue full-page reads, but if some block
+ * in a page fails to read, blk_update_request() will
+ * advance bv_offset and adjust bv_len to compensate.
+ * Print a warning for nonzero offsets, and an error
+ * if they don't add up to a full page. */
+ if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
+ printk("%s page read in btrfs with offset %u and length %u\n",
+ bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
+ ? KERN_ERR "partial" : KERN_INFO "incomplete",
+ bvec->bv_offset, bvec->bv_len);
- if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
- whole_page = 1;
- else
- whole_page = 0;
+ start = page_offset(page);
+ end = start + bvec->bv_offset + bvec->bv_len - 1;
if (++bvec <= bvec_end)
prefetchw(&bvec->bv_page->flags);
}
spin_unlock(&tree->lock);
- mirror = (int)(unsigned long)bio->bi_bdev;
+ mirror = io_bio->mirror_num;
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
state, mirror);
}
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
- if (whole_page) {
- if (uptodate) {
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
+ if (uptodate) {
+ SetPageUptodate(page);
} else {
- if (uptodate) {
- check_page_uptodate(tree, page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- check_page_locked(tree, page);
+ ClearPageUptodate(page);
+ SetPageError(page);
}
+ unlock_page(page);
} while (bvec <= bvec_end);
bio_put(bio);
}
+/*
+ * this allocates from the btrfs_bioset. We're returning a bio right now
+ * but you can call btrfs_io_bio for the appropriate container_of magic
+ */
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags)
{
struct bio *bio;
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
+ while (!bio && (nr_vecs /= 2)) {
+ bio = bio_alloc_bioset(gfp_flags,
+ nr_vecs, btrfs_bioset);
+ }
}
if (bio) {
return bio;
}
+struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
+{
+ return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
+}
+
+
+/* this also allocates from the btrfs_bioset */
+struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
+}
+
+
static int __must_check submit_one_bio(int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
last_for_get_extent = isize;
}
- lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
&cached_state);
em = get_extent_skip_holes(inode, start, last_for_get_extent,
out_free:
free_extent_map(em);
out:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
&cached_state, GFP_NOFS);
return ret;
}
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags);
+struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
+struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
struct btrfs_fs_info;
block_group->key.objectid);
}
-int btrfs_truncate_free_space_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct inode *inode)
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv)
{
- struct btrfs_block_rsv *rsv;
u64 needed_bytes;
- loff_t oldsize;
- int ret = 0;
-
- rsv = trans->block_rsv;
- trans->block_rsv = &root->fs_info->global_block_rsv;
+ int ret;
/* 1 for slack space, 1 for updating the inode */
needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
btrfs_calc_trans_metadata_size(root, 1);
- spin_lock(&trans->block_rsv->lock);
- if (trans->block_rsv->reserved < needed_bytes) {
- spin_unlock(&trans->block_rsv->lock);
- trans->block_rsv = rsv;
- return -ENOSPC;
- }
- spin_unlock(&trans->block_rsv->lock);
+ spin_lock(&rsv->lock);
+ if (rsv->reserved < needed_bytes)
+ ret = -ENOSPC;
+ else
+ ret = 0;
+ spin_unlock(&rsv->lock);
+ return 0;
+}
+
+int btrfs_truncate_free_space_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct inode *inode)
+{
+ loff_t oldsize;
+ int ret = 0;
oldsize = i_size_read(inode);
btrfs_i_size_write(inode, 0);
*/
ret = btrfs_truncate_inode_items(trans, root, inode,
0, BTRFS_EXTENT_DATA_KEY);
-
if (ret) {
- trans->block_rsv = rsv;
btrfs_abort_transaction(trans, root, ret);
return ret;
}
ret = btrfs_update_inode(trans, root, inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
- trans->block_rsv = rsv;
return ret;
}
/* Make sure we can fit our crcs into the first page */
if (io_ctl.check_crcs &&
- (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
- WARN_ON(1);
+ (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
goto out_nospc;
- }
io_ctl_set_generation(&io_ctl, trans->transid);
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_path *path,
num_bytes = trans->bytes_reserved;
/*
* 1 item for inode item insertion if need
- * 3 items for inode item update (in the worst case)
+ * 4 items for inode item update (in the worst case)
+ * 1 items for slack space if we need do truncation
* 1 item for free space object
* 3 items for pre-allocation
*/
- trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
+ trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
trans->bytes_reserved,
BTRFS_RESERVE_NO_FLUSH);
if (i_size_read(inode) > 0) {
ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ if (ret != -ENOSPC)
+ btrfs_abort_transaction(trans, root, ret);
goto out_put;
}
}
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
- if (!em)
+ if (!em) {
+ ret = -ENOMEM;
goto out_free_reserve;
+ }
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
}
em = alloc_extent_map();
- if (!em)
+ if (!em) {
+ ret = -ENOMEM;
goto out_reserve;
+ }
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
no_delete:
+ btrfs_remove_delayed_node(inode);
clear_inode(inode);
return;
}
struct rb_node **p;
struct rb_node *parent;
u64 ino = btrfs_ino(inode);
-again:
- p = &root->inode_tree.rb_node;
- parent = NULL;
if (inode_unhashed(inode))
return;
-
+again:
+ parent = NULL;
spin_lock(&root->inode_lock);
+ p = &root->inode_tree.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
/* IO errors */
int errors;
+ /* orig_bio is our btrfs_io_bio */
struct bio *orig_bio;
+
+ /* dio_bio came from fs/direct-io.c */
+ struct bio *dio_bio;
};
static void btrfs_endio_direct_read(struct bio *bio, int err)
struct bio_vec *bvec = bio->bi_io_vec;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct bio *dio_bio;
u64 start;
start = dip->logical_offset;
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
- bio->bi_private = dip->private;
+ dio_bio = dip->dio_bio;
kfree(dip);
/* If we had a csum failure make sure to clear the uptodate flag */
if (err)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- dio_end_io(bio, err);
+ clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+ dio_end_io(dio_bio, err);
+ bio_put(bio);
}
static void btrfs_endio_direct_write(struct bio *bio, int err)
struct btrfs_ordered_extent *ordered = NULL;
u64 ordered_offset = dip->logical_offset;
u64 ordered_bytes = dip->bytes;
+ struct bio *dio_bio;
int ret;
if (err)
goto again;
}
out_done:
- bio->bi_private = dip->private;
+ dio_bio = dip->dio_bio;
kfree(dip);
/* If we had an error make sure to clear the uptodate flag */
if (err)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- dio_end_io(bio, err);
+ clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+ dio_end_io(dio_bio, err);
+ bio_put(bio);
}
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
if (!atomic_dec_and_test(&dip->pending_bios))
goto out;
- if (dip->errors)
+ if (dip->errors) {
bio_io_error(dip->orig_bio);
- else {
- set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
+ } else {
+ set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
bio_endio(dip->orig_bio, 0);
}
out:
return 0;
}
-static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
- loff_t file_offset)
+static void btrfs_submit_direct(int rw, struct bio *dio_bio,
+ struct inode *inode, loff_t file_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_dio_private *dip;
- struct bio_vec *bvec = bio->bi_io_vec;
+ struct bio_vec *bvec = dio_bio->bi_io_vec;
+ struct bio *io_bio;
int skip_sum;
int write = rw & REQ_WRITE;
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+ io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
+
+ if (!io_bio) {
+ ret = -ENOMEM;
+ goto free_ordered;
+ }
+
dip = kmalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
- goto free_ordered;
+ goto free_io_bio;
}
- dip->private = bio->bi_private;
+ dip->private = dio_bio->bi_private;
+ io_bio->bi_private = dio_bio->bi_private;
dip->inode = inode;
dip->logical_offset = file_offset;
do {
dip->bytes += bvec->bv_len;
bvec++;
- } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
+ } while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1));
- dip->disk_bytenr = (u64)bio->bi_sector << 9;
- bio->bi_private = dip;
+ dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+ io_bio->bi_private = dip;
dip->errors = 0;
- dip->orig_bio = bio;
+ dip->orig_bio = io_bio;
+ dip->dio_bio = dio_bio;
atomic_set(&dip->pending_bios, 0);
if (write)
- bio->bi_end_io = btrfs_endio_direct_write;
+ io_bio->bi_end_io = btrfs_endio_direct_write;
else
- bio->bi_end_io = btrfs_endio_direct_read;
+ io_bio->bi_end_io = btrfs_endio_direct_read;
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
if (!ret)
return;
+
+free_io_bio:
+ bio_put(io_bio);
+
free_ordered:
/*
* If this is a write, we need to clean up the reserved space and kill
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
- bio_endio(bio, ret);
+ bio_endio(dio_bio, ret);
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
- btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
item_off = btrfs_item_ptr_offset(leaf, i);
item_len = btrfs_item_size_nr(leaf, i);
- if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+ btrfs_item_key_to_cpu(leaf, key, i);
+ if (!key_in_sk(key, sk))
+ continue;
+
+ if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
item_len = 0;
if (sizeof(sh) + item_len + *sk_offset >
goto overflow;
}
- btrfs_item_key_to_cpu(leaf, key, i);
- if (!key_in_sk(key, sk))
- continue;
-
sh.objectid = key->objectid;
sh.offset = key->offset;
sh.type = key->type;
}
/* put a new bio on the list */
- bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
if (!bio)
return -ENOMEM;
if (!eb || !extent_buffer_uptodate(eb)) {
ret = (!eb) ? -ENOMEM : -EIO;
free_extent_buffer(eb);
- return ret;
+ break;
}
btrfs_tree_lock(eb);
if (cow) {
}
truncate:
+ ret = btrfs_check_trunc_cache_free_space(root,
+ &fs_info->global_block_rsv);
+ if (ret)
+ goto out;
+
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
}
WARN_ON(!page->page);
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio) {
page->io_error = 1;
sblock->no_io_error_seen = 0;
return -EIO;
}
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
bio->bi_bdev = page_bad->dev->bdev;
sbio->dev = wr_ctx->tgtdev;
bio = sbio->bio;
if (!bio) {
- bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
if (!bio) {
mutex_unlock(&wr_ctx->wr_lock);
return -ENOMEM;
sbio->dev = spage->dev;
bio = sbio->bio;
if (!bio) {
- bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
if (!bio)
return -ENOMEM;
sbio->bio = bio;
"btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
return -EIO;
}
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio) {
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
btrfs_dev_replace_suspend_for_unmount(fs_info);
btrfs_scrub_cancel(fs_info);
+ btrfs_pause_balance(fs_info);
ret = btrfs_commit_super(root);
if (ret)
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
if (num_devices == 1)
allowed |= BTRFS_BLOCK_GROUP_DUP;
- else if (num_devices < 4)
+ else if (num_devices > 1)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
- else
- allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_RAID5 |
- BTRFS_BLOCK_GROUP_RAID6);
-
+ if (num_devices > 2)
+ allowed |= BTRFS_BLOCK_GROUP_RAID5;
+ if (num_devices > 3)
+ allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID6);
if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(!alloc_profile_is_valid(bctl->data.target, 1) ||
(bctl->data.target & ~allowed))) {
return 0;
}
-static void *merge_stripe_index_into_bio_private(void *bi_private,
- unsigned int stripe_index)
-{
- /*
- * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
- * at most 1.
- * The alternative solution (instead of stealing bits from the
- * pointer) would be to allocate an intermediate structure
- * that contains the old private pointer plus the stripe_index.
- */
- BUG_ON((((uintptr_t)bi_private) & 3) != 0);
- BUG_ON(stripe_index > 3);
- return (void *)(((uintptr_t)bi_private) | stripe_index);
-}
-
-static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
-{
- return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
-}
-
-static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
-{
- return (unsigned int)((uintptr_t)bi_private) & 3;
-}
-
static void btrfs_end_bio(struct bio *bio, int err)
{
- struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
+ struct btrfs_bio *bbio = bio->bi_private;
int is_orig_bio = 0;
if (err) {
atomic_inc(&bbio->error);
if (err == -EIO || err == -EREMOTEIO) {
unsigned int stripe_index =
- extract_stripe_index_from_bio_private(
- bio->bi_private);
+ btrfs_io_bio(bio)->stripe_index;
struct btrfs_device *dev;
BUG_ON(stripe_index >= bbio->num_stripes);
}
bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io;
- bio->bi_bdev = (struct block_device *)
- (unsigned long)bbio->mirror_num;
+ btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
/* only send an error to the higher layers if it is
* beyond the tolerance of the btrfs bio
*/
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
bio->bi_private = bbio;
- bio->bi_private = merge_stripe_index_into_bio_private(
- bio->bi_private, (unsigned int)dev_nr);
+ btrfs_io_bio(bio)->stripe_index = dev_nr;
bio->bi_end_io = btrfs_end_bio;
bio->bi_sector = physical >> 9;
#ifdef DEBUG
if (atomic_dec_and_test(&bbio->stripes_pending)) {
bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io;
- bio->bi_bdev = (struct block_device *)
- (unsigned long)bbio->mirror_num;
+ btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio->bi_sector = logical >> 9;
kfree(bbio);
bio_endio(bio, -EIO);
}
if (dev_nr < total_devs - 1) {
- bio = bio_clone(first_bio, GFP_NOFS);
+ bio = btrfs_bio_clone(first_bio, GFP_NOFS);
BUG_ON(!bio); /* -ENOMEM */
} else {
bio = first_bio;
int rotating;
};
+/*
+ * we need the mirror number and stripe index to be passed around
+ * the call chain while we are processing end_io (especially errors).
+ * Really, what we need is a btrfs_bio structure that has this info
+ * and is properly sized with its stripe array, but we're not there
+ * quite yet. We have our own btrfs bioset, and all of the bios
+ * we allocate are actually btrfs_io_bios. We'll cram as much of
+ * struct btrfs_bio as we can into this over time.
+ */
+struct btrfs_io_bio {
+ unsigned long mirror_num;
+ unsigned long stripe_index;
+ struct bio bio;
+};
+
+static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
+{
+ return container_of(bio, struct btrfs_io_bio, bio);
+}
+
struct btrfs_bio_stripe {
struct btrfs_device *dev;
u64 physical;
#include <linux/slab.h>
#include <linux/vfs.h>
#include <linux/fs.h>
+#include <linux/inet.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifsfs.h"
}
/**
- * cifs_get_share_name - extracts share name from UNC
- * @node_name: pointer to UNC string
+ * cifs_build_devname - build a devicename from a UNC and optional prepath
+ * @nodename: pointer to UNC string
+ * @prepath: pointer to prefixpath (or NULL if there isn't one)
*
- * Extracts sharename form full UNC.
- * i.e. strips from UNC trailing path that is not part of share
- * name and fixup missing '\' in the beginning of DFS node refferal
- * if necessary.
- * Returns pointer to share name on success or ERR_PTR on error.
- * Caller is responsible for freeing returned string.
+ * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer
+ * big enough to hold the final thing. Copy the UNC from the nodename, and
+ * concatenate the prepath onto the end of it if there is one.
+ *
+ * Returns pointer to the built string, or a ERR_PTR. Caller is responsible
+ * for freeing the returned string.
*/
-static char *cifs_get_share_name(const char *node_name)
+static char *
+cifs_build_devname(char *nodename, const char *prepath)
{
- int len;
- char *UNC;
- char *pSep;
-
- len = strlen(node_name);
- UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
- GFP_KERNEL);
- if (!UNC)
- return ERR_PTR(-ENOMEM);
+ size_t pplen;
+ size_t unclen;
+ char *dev;
+ char *pos;
+
+ /* skip over any preceding delimiters */
+ nodename += strspn(nodename, "\\");
+ if (!*nodename)
+ return ERR_PTR(-EINVAL);
- /* get share name and server name */
- if (node_name[1] != '\\') {
- UNC[0] = '\\';
- strncpy(UNC+1, node_name, len);
- len++;
- UNC[len] = 0;
- } else {
- strncpy(UNC, node_name, len);
- UNC[len] = 0;
- }
+ /* get length of UNC and set pos to last char */
+ unclen = strlen(nodename);
+ pos = nodename + unclen - 1;
- /* find server name end */
- pSep = memchr(UNC+2, '\\', len-2);
- if (!pSep) {
- cifs_dbg(VFS, "%s: no server name end in node name: %s\n",
- __func__, node_name);
- kfree(UNC);
- return ERR_PTR(-EINVAL);
+ /* trim off any trailing delimiters */
+ while (*pos == '\\') {
+ --pos;
+ --unclen;
}
- /* find sharename end */
- pSep++;
- pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC));
- if (pSep) {
- /* trim path up to sharename end
- * now we have share name in UNC */
- *pSep = 0;
+ /* allocate a buffer:
+ * +2 for preceding "//"
+ * +1 for delimiter between UNC and prepath
+ * +1 for trailing NULL
+ */
+ pplen = prepath ? strlen(prepath) : 0;
+ dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ pos = dev;
+ /* add the initial "//" */
+ *pos = '/';
+ ++pos;
+ *pos = '/';
+ ++pos;
+
+ /* copy in the UNC portion from referral */
+ memcpy(pos, nodename, unclen);
+ pos += unclen;
+
+ /* copy the prefixpath remainder (if there is one) */
+ if (pplen) {
+ *pos = '/';
+ ++pos;
+ memcpy(pos, prepath, pplen);
+ pos += pplen;
}
- return UNC;
+ /* NULL terminator */
+ *pos = '\0';
+
+ convert_delimiter(dev, '/');
+ return dev;
}
{
int rc;
char *mountdata = NULL;
+ const char *prepath = NULL;
int md_len;
char *tkn_e;
char *srvIP = NULL;
if (sb_mountdata == NULL)
return ERR_PTR(-EINVAL);
- *devname = cifs_get_share_name(ref->node_name);
+ if (strlen(fullpath) - ref->path_consumed)
+ prepath = fullpath + ref->path_consumed;
+
+ *devname = cifs_build_devname(ref->node_name, prepath);
if (IS_ERR(*devname)) {
rc = PTR_ERR(*devname);
*devname = NULL;
goto compose_mount_options_err;
}
- /* md_len = strlen(...) + 12 for 'sep+prefixpath='
- * assuming that we have 'unc=' and 'ip=' in
- * the original sb_mountdata
+ /*
+ * In most cases, we'll be building a shorter string than the original,
+ * but we do have to assume that the address in the ip= option may be
+ * much longer than the original. Add the max length of an address
+ * string to the length of the original string to allow for worst case.
*/
- md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
- mountdata = kzalloc(md_len+1, GFP_KERNEL);
+ md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
+ mountdata = kzalloc(md_len + 1, GFP_KERNEL);
if (mountdata == NULL) {
rc = -ENOMEM;
goto compose_mount_options_err;
strncat(mountdata, &sep, 1);
strcat(mountdata, "ip=");
strcat(mountdata, srvIP);
- strncat(mountdata, &sep, 1);
- strcat(mountdata, "unc=");
- strcat(mountdata, *devname);
-
- /* find & copy prefixpath */
- tkn_e = strchr(ref->node_name + 2, '\\');
- if (tkn_e == NULL) {
- /* invalid unc, missing share name*/
- rc = -EINVAL;
- goto compose_mount_options_err;
- }
-
- tkn_e = strchr(tkn_e + 1, '\\');
- if (tkn_e || (strlen(fullpath) - ref->path_consumed)) {
- strncat(mountdata, &sep, 1);
- strcat(mountdata, "prefixpath=");
- if (tkn_e)
- strcat(mountdata, tkn_e + 1);
- strcat(mountdata, fullpath + ref->path_consumed);
- }
/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
cifs_show_security(s, tcon->ses->server);
cifs_show_cache_flavor(s, cifs_sb);
- seq_printf(s, ",unc=");
- seq_escape(s, tcon->treeName, " \t\n\\");
-
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_printf(s, ",multiuser");
else if (tcon->ses->user_name)
#endif
case Opt_sec_none:
vol->nullauth = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLM;
break;
default:
cifs_dbg(VFS, "bad security option: %s\n", value);
vol->backupuid_specified = false; /* no backup intent for a user */
vol->backupgid_specified = false; /* no backup intent for a group */
- /*
- * For now, we ignore -EINVAL errors under the assumption that the
- * unc= and prefixpath= options will be usable.
- */
- if (cifs_parse_devname(devname, vol) == -ENOMEM) {
- printk(KERN_ERR "CIFS: Unable to allocate memory to parse "
- "device string.\n");
- goto out_nomem;
+ switch (cifs_parse_devname(devname, vol)) {
+ case 0:
+ break;
+ case -ENOMEM:
+ cifs_dbg(VFS, "Unable to allocate memory for devname.\n");
+ goto cifs_parse_mount_err;
+ case -EINVAL:
+ cifs_dbg(VFS, "Malformed UNC in devname.\n");
+ goto cifs_parse_mount_err;
+ default:
+ cifs_dbg(VFS, "Unknown error parsing devname.\n");
+ goto cifs_parse_mount_err;
}
while ((data = strsep(&options, separator)) != NULL) {
}
#endif
if (!vol->UNC) {
- cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string or in unc= option!\n");
+ cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n");
goto cifs_parse_mount_err;
}
pos = full_path + unc_len;
if (pplen) {
- *pos++ = CIFS_DIR_SEP(cifs_sb);
- strncpy(pos, vol->prepath, pplen);
+ *pos = CIFS_DIR_SEP(cifs_sb);
+ strncpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
/**
* dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
- * @unc: UNC path specifying the server
+ * @unc: UNC path specifying the server (with '/' as delimiter)
* @ip_addr: Where to return the IP address.
*
* The IP address will be returned in string form, and the caller is
hostname = unc + 2;
/* Search for server name delimiter */
- sep = memchr(hostname, '\\', len);
+ sep = memchr(hostname, '/', len);
if (sep)
len = sep - hostname;
else
if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
inode->i_flags |= S_AUTOMOUNT;
- cifs_set_ops(inode);
+ if (inode->i_state & I_NEW)
+ cifs_set_ops(inode);
}
void
static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
+ int rc;
+
+ rc = filemap_write_and_wait(file->f_mapping);
+ if (rc)
+ return rc;
+
return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
}
bytes = efivar_entry_set_get_size(var, attributes, &datasize,
data, &set);
- if (!set && bytes)
+ if (!set && bytes) {
+ if (bytes == -ENOENT)
+ bytes = -EIO;
goto out;
+ }
if (bytes == -ENOENT) {
drop_nlink(inode);
int err;
err = efivar_entry_size(var, &datasize);
- if (err)
+
+ /*
+ * efivarfs represents uncommitted variables with
+ * zero-length files. Reading them should return EOF.
+ */
+ if (err == -ENOENT)
+ return 0;
+ else if (err)
return err;
data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
ssize_t size; /* size of the extent */
struct kiocb *iocb; /* iocb struct for AIO */
int result; /* error value for AIO */
- atomic_t count; /* reference counter */
} ext4_io_end_t;
struct ext4_io_submit {
/* page-io.c */
extern int __init ext4_init_pageio(void);
+extern void ext4_add_complete_io(ext4_io_end_t *io_end);
extern void ext4_exit_pageio(void);
extern void ext4_ioend_shutdown(struct inode *);
+extern void ext4_free_io_end(ext4_io_end_t *io);
extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
-extern int ext4_put_io_end(ext4_io_end_t *io_end);
-extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
-extern void ext4_io_submit_init(struct ext4_io_submit *io,
- struct writeback_control *wbc);
extern void ext4_end_io_work(struct work_struct *work);
extern void ext4_io_submit(struct ext4_io_submit *io);
extern int ext4_bio_write_page(struct ext4_io_submit *io,
{
struct extent_status es;
- ext4_es_find_delayed_extent(inode, lblk_start, &es);
+ ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
if (es.es_len == 0)
return 0; /* there is no delay extent in this tree */
else if (es.es_lblk <= lblk_start &&
struct extent_status es;
ext4_lblk_t block, next_del;
- ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
-
if (newes->es_pblk == 0) {
+ ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
+ newes->es_lblk + newes->es_len - 1, &es);
+
/*
* No extent in extent-tree contains block @newes->es_pblk,
* then the block may stay in 1)a hole or 2)delayed-extent.
}
block = newes->es_lblk + newes->es_len;
- ext4_es_find_delayed_extent(inode, block, &es);
+ ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
if (es.es_len == 0)
next_del = EXT_MAX_BLOCKS;
else
}
/*
- * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
- * if it exists, otherwise, the next extent after @es->lblk.
+ * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
+ * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
*
* @inode: the inode which owns delayed extents
* @lblk: the offset where we start to search
+ * @end: the offset where we stop to search
* @es: delayed extent that we found
*/
-void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+void ext4_es_find_delayed_extent_range(struct inode *inode,
+ ext4_lblk_t lblk, ext4_lblk_t end,
struct extent_status *es)
{
struct ext4_es_tree *tree = NULL;
struct rb_node *node;
BUG_ON(es == NULL);
- trace_ext4_es_find_delayed_extent_enter(inode, lblk);
+ BUG_ON(end < lblk);
+ trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
read_lock(&EXT4_I(inode)->i_es_lock);
tree = &EXT4_I(inode)->i_es_tree;
if (es1 && !ext4_es_is_delayed(es1)) {
while ((node = rb_next(&es1->rb_node)) != NULL) {
es1 = rb_entry(node, struct extent_status, rb_node);
+ if (es1->es_lblk > end) {
+ es1 = NULL;
+ break;
+ }
if (ext4_es_is_delayed(es1))
break;
}
read_unlock(&EXT4_I(inode)->i_es_lock);
ext4_es_lru_add(inode);
- trace_ext4_es_find_delayed_extent_exit(inode, es);
+ trace_ext4_es_find_delayed_extent_range_exit(inode, es);
}
static struct extent_status *
unsigned long long status);
extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
-extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+extern void ext4_es_find_delayed_extent_range(struct inode *inode,
+ ext4_lblk_t lblk, ext4_lblk_t end,
struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
* If there is a delay extent at this offset,
* it will be as a data.
*/
- ext4_es_find_delayed_extent(inode, last, &es);
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
if (last != start)
dataoff = last << blkbits;
* If there is a delay extent at this offset,
* we will skip this extent.
*/
- ext4_es_find_delayed_extent(inode, last, &es);
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
last = es.es_lblk + es.es_len;
holeoff = last << blkbits;
struct ext4_io_submit io_submit;
BUG_ON(mpd->next_page <= mpd->first_page);
- ext4_io_submit_init(&io_submit, mpd->wbc);
- io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_submit.io_end)
- return -ENOMEM;
+ memset(&io_submit, 0, sizeof(io_submit));
/*
* We need to start from the first_page to the next_page - 1
* to make sure we also write the mapped dirty buffer_heads.
pagevec_release(&pvec);
}
ext4_io_submit(&io_submit);
- /* Drop io_end reference we got from init */
- ext4_put_io_end_defer(io_submit.io_end);
return ret;
}
*/
return __ext4_journalled_writepage(page, len);
- ext4_io_submit_init(&io_submit, wbc);
- io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_submit.io_end) {
- redirty_page_for_writepage(wbc, page);
- return -ENOMEM;
- }
+ memset(&io_submit, 0, sizeof(io_submit));
ret = ext4_bio_write_page(&io_submit, page, len, wbc);
ext4_io_submit(&io_submit);
- /* Drop io_end reference we got from init */
- ext4_put_io_end_defer(io_submit.io_end);
return ret;
}
struct inode *inode = file_inode(iocb->ki_filp);
ext4_io_end_t *io_end = iocb->private;
- /* if not async direct IO just return */
- if (!io_end) {
- inode_dio_done(inode);
- if (is_async)
- aio_complete(iocb, ret, 0);
- return;
- }
+ /* if not async direct IO or dio with 0 bytes write, just return */
+ if (!io_end || !size)
+ goto out;
ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
size);
iocb->private = NULL;
+
+ /* if not aio dio with unwritten extents, just free io and return */
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+out:
+ inode_dio_done(inode);
+ if (is_async)
+ aio_complete(iocb, ret, 0);
+ return;
+ }
+
io_end->offset = offset;
io_end->size = size;
if (is_async) {
io_end->iocb = iocb;
io_end->result = ret;
}
- ext4_put_io_end_defer(io_end);
+
+ ext4_add_complete_io(io_end);
}
/*
get_block_t *get_block_func = NULL;
int dio_flags = 0;
loff_t final_size = offset + count;
- ext4_io_end_t *io_end = NULL;
/* Use the old path for reads and writes beyond i_size. */
if (rw != WRITE || final_size > inode->i_size)
iocb->private = NULL;
ext4_inode_aio_set(inode, NULL);
if (!is_sync_kiocb(iocb)) {
- io_end = ext4_init_io_end(inode, GFP_NOFS);
+ ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end) {
ret = -ENOMEM;
goto retake_lock;
}
io_end->flag |= EXT4_IO_END_DIRECT;
- /*
- * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
- */
- iocb->private = ext4_get_io_end(io_end);
+ iocb->private = io_end;
/*
* we save the io structure for current async direct
* IO, so that later ext4_map_blocks() could flag the
NULL,
dio_flags);
+ if (iocb->private)
+ ext4_inode_aio_set(inode, NULL);
/*
- * Put our reference to io_end. This can free the io_end structure e.g.
- * in sync IO case or in case of error. It can even perform extent
- * conversion if all bios we submitted finished before we got here.
- * Note that in that case iocb->private can be already set to NULL
- * here.
+ * The io_end structure takes a reference to the inode, that
+ * structure needs to be destroyed and the reference to the
+ * inode need to be dropped, when IO is complete, even with 0
+ * byte write, or failed.
+ *
+ * In the successful AIO DIO case, the io_end structure will
+ * be destroyed and the reference to the inode will be dropped
+ * after the end_io call back function is called.
+ *
+ * In the case there is 0 byte write, or error case, since VFS
+ * direct IO won't invoke the end_io call back function, we
+ * need to free the end_io structure here.
*/
- if (io_end) {
- ext4_inode_aio_set(inode, NULL);
- ext4_put_io_end(io_end);
- /*
- * In case of error or no write ext4_end_io_dio() was not
- * called so we have to put iocb's reference.
- */
- if (ret <= 0 && ret != -EIOCBQUEUED) {
- WARN_ON(iocb->private != io_end);
- ext4_put_io_end(io_end);
- iocb->private = NULL;
- }
- }
- if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+ if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+ ext4_free_io_end(iocb->private);
+ iocb->private = NULL;
+ } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
group = ac->ac_g_ex.fe_group;
for (i = 0; i < ngroups; group++, i++) {
- if (group == ngroups)
+ /*
+ * Artificially restricted ngroups for non-extent
+ * files makes group > ngroups possible on first loop.
+ */
+ if (group >= ngroups)
group = 0;
/* This now checks without needing the buddy page */
cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
}
-static void ext4_release_io_end(ext4_io_end_t *io_end)
+void ext4_free_io_end(ext4_io_end_t *io)
{
- BUG_ON(!list_empty(&io_end->list));
- BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
-
- if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
- wake_up_all(ext4_ioend_wq(io_end->inode));
- if (io_end->flag & EXT4_IO_END_DIRECT)
- inode_dio_done(io_end->inode);
- if (io_end->iocb)
- aio_complete(io_end->iocb, io_end->result, 0);
- kmem_cache_free(io_end_cachep, io_end);
-}
-
-static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
-{
- struct inode *inode = io_end->inode;
+ BUG_ON(!io);
+ BUG_ON(!list_empty(&io->list));
+ BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
- io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
- /* Wake up anyone waiting on unwritten extent conversion */
- if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
- wake_up_all(ext4_ioend_wq(inode));
+ if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
+ wake_up_all(ext4_ioend_wq(io->inode));
+ kmem_cache_free(io_end_cachep, io);
}
/* check a range of space and convert unwritten extents to written. */
"(inode %lu, offset %llu, size %zd, error %d)",
inode->i_ino, offset, size, ret);
}
- ext4_clear_io_unwritten_flag(io);
- ext4_release_io_end(io);
+ /* Wake up anyone waiting on unwritten extent conversion */
+ if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+ wake_up_all(ext4_ioend_wq(inode));
+ if (io->flag & EXT4_IO_END_DIRECT)
+ inode_dio_done(inode);
+ if (io->iocb)
+ aio_complete(io->iocb, io->result, 0);
return ret;
}
}
/* Add the io_end to per-inode completed end_io list. */
-static void ext4_add_complete_io(ext4_io_end_t *io_end)
+void ext4_add_complete_io(ext4_io_end_t *io_end)
{
struct ext4_inode_info *ei = EXT4_I(io_end->inode);
struct workqueue_struct *wq;
err = ext4_end_io(io);
if (unlikely(!ret && err))
ret = err;
+ io->flag &= ~EXT4_IO_END_UNWRITTEN;
+ ext4_free_io_end(io);
}
return ret;
}
atomic_inc(&EXT4_I(inode)->i_ioend_count);
io->inode = inode;
INIT_LIST_HEAD(&io->list);
- atomic_set(&io->count, 1);
}
return io;
}
-void ext4_put_io_end_defer(ext4_io_end_t *io_end)
-{
- if (atomic_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
- ext4_release_io_end(io_end);
- return;
- }
- ext4_add_complete_io(io_end);
- }
-}
-
-int ext4_put_io_end(ext4_io_end_t *io_end)
-{
- int err = 0;
-
- if (atomic_dec_and_test(&io_end->count)) {
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
- err = ext4_convert_unwritten_extents(io_end->inode,
- io_end->offset, io_end->size);
- ext4_clear_io_unwritten_flag(io_end);
- }
- ext4_release_io_end(io_end);
- }
- return err;
-}
-
-ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
-{
- atomic_inc(&io_end->count);
- return io_end;
-}
-
/*
* Print an buffer I/O error compatible with the fs/buffer.c. This
* provides compatibility with dmesg scrapers that look for a specific
bi_sector >> (inode->i_blkbits - 9));
}
- ext4_put_io_end_defer(io_end);
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ return;
+ }
+
+ ext4_add_complete_io(io_end);
}
void ext4_io_submit(struct ext4_io_submit *io)
bio_put(io->io_bio);
}
io->io_bio = NULL;
-}
-
-void ext4_io_submit_init(struct ext4_io_submit *io,
- struct writeback_control *wbc)
-{
- io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
- io->io_bio = NULL;
+ io->io_op = 0;
io->io_end = NULL;
}
-static int io_submit_init_bio(struct ext4_io_submit *io,
- struct buffer_head *bh)
+static int io_submit_init(struct ext4_io_submit *io,
+ struct inode *inode,
+ struct writeback_control *wbc,
+ struct buffer_head *bh)
{
+ ext4_io_end_t *io_end;
+ struct page *page = bh->b_page;
int nvecs = bio_get_nr_vecs(bh->b_bdev);
struct bio *bio;
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end)
+ return -ENOMEM;
bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
+ bio->bi_private = io->io_end = io_end;
bio->bi_end_io = ext4_end_bio;
- bio->bi_private = ext4_get_io_end(io->io_end);
- if (!io->io_end->size)
- io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT)
- + bh_offset(bh);
+
+ io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
+
io->io_bio = bio;
+ io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
io->io_next_block = bh->b_blocknr;
return 0;
}
static int io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
+ struct writeback_control *wbc,
struct buffer_head *bh)
{
ext4_io_end_t *io_end;
ext4_io_submit(io);
}
if (io->io_bio == NULL) {
- ret = io_submit_init_bio(io, bh);
+ ret = io_submit_init(io, inode, wbc, bh);
if (ret)
return ret;
}
- ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
- if (ret != bh->b_size)
- goto submit_and_retry;
io_end = io->io_end;
if (test_clear_buffer_uninit(bh))
ext4_set_io_unwritten_flag(inode, io_end);
- io_end->size += bh->b_size;
+ io->io_end->size += bh->b_size;
io->io_next_block++;
+ ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+ if (ret != bh->b_size)
+ goto submit_and_retry;
return 0;
}
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bh);
+ ret = io_submit_add_bh(io, inode, wbc, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
return 0;
}
+static unsigned long calc_fat_clusters(struct super_block *sb)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+ /* Divide first to avoid overflow */
+ if (sbi->fat_bits != 12) {
+ unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
+ return ent_per_sec * sbi->fat_length;
+ }
+
+ return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+}
+
/*
* Read the super block of an MS-DOS FS.
*/
sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
/* check that FAT table does not overflow */
- fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+ fat_clusters = calc_fat_clusters(sb);
total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
if (total_clusters > MAX_FAT(sb)) {
if (!silent)
static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
{
struct inode *inode;
+ struct dentry *parent;
+ struct fuse_conn *fc;
inode = ACCESS_ONCE(entry->d_inode);
if (inode && is_bad_inode(inode))
else if (fuse_dentry_time(entry) < get_jiffies_64()) {
int err;
struct fuse_entry_out outarg;
- struct fuse_conn *fc;
struct fuse_req *req;
struct fuse_forget_link *forget;
- struct dentry *parent;
u64 attr_version;
/* For negative dentries, always do a fresh lookup */
entry_attr_timeout(&outarg),
attr_version);
fuse_change_entry_timeout(entry, &outarg);
+ } else if (inode) {
+ fc = get_fuse_conn(inode);
+ if (fc->readdirplus_auto) {
+ parent = dget_parent(entry);
+ fuse_advise_use_readdirplus(parent->d_inode);
+ dput(parent);
+ }
}
- fuse_advise_use_readdirplus(inode);
return 1;
}
#include <linux/compat.h>
#include <linux/swap.h>
#include <linux/aio.h>
+#include <linux/falloc.h>
static const struct file_operations fuse_direct_io_file_operations;
iov_iter_init(&ii, iov, nr_segs, count, 0);
- req = fuse_get_req(fc, fuse_iter_npages(&ii));
+ if (io->async)
+ req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+ else
+ req = fuse_get_req(fc, fuse_iter_npages(&ii));
if (IS_ERR(req))
return PTR_ERR(req);
break;
if (count) {
fuse_put_request(fc, req);
- req = fuse_get_req(fc, fuse_iter_npages(&ii));
+ if (io->async)
+ req = fuse_get_req_for_background(fc,
+ fuse_iter_npages(&ii));
+ else
+ req = fuse_get_req(fc, fuse_iter_npages(&ii));
if (IS_ERR(req))
break;
}
fuse_do_setattr(inode, &attr, file);
}
+static inline loff_t fuse_round_up(loff_t off)
+{
+ return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
+}
+
static ssize_t
fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
ssize_t ret = 0;
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
+ bool async_dio = ff->fc->async_dio;
loff_t pos = 0;
struct inode *inode;
loff_t i_size;
i_size = i_size_read(inode);
/* optimization for short read */
- if (rw != WRITE && offset + count > i_size) {
+ if (async_dio && rw != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
- count = i_size - offset;
+ count = min_t(loff_t, count, fuse_round_up(i_size - offset));
}
io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
* By default, we want to optimize all I/Os with async request
* submission to the client filesystem if supported.
*/
- io->async = ff->fc->async_dio;
+ io->async = async_dio;
io->iocb = iocb;
/*
* to wait on real async I/O requests, so we must submit this request
* synchronously.
*/
- if (!is_sync_kiocb(iocb) && (offset + count > i_size))
+ if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
io->async = false;
if (rw == WRITE)
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
/* we have a non-extending, async request, so return */
- if (ret > 0 && !is_sync_kiocb(iocb))
+ if (!is_sync_kiocb(iocb))
return -EIOCBQUEUED;
ret = wait_on_sync_kiocb(iocb);
loff_t length)
{
struct fuse_file *ff = file->private_data;
+ struct inode *inode = file->f_inode;
struct fuse_conn *fc = ff->fc;
struct fuse_req *req;
struct fuse_fallocate_in inarg = {
if (fc->no_fallocate)
return -EOPNOTSUPP;
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ mutex_lock(&inode->i_mutex);
+ fuse_set_nowrite(inode);
+ }
+
req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
req->in.h.opcode = FUSE_FALLOCATE;
req->in.h.nodeid = ff->nodeid;
}
fuse_put_request(fc, req);
+ if (err)
+ goto out;
+
+ /* we could have extended the file */
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ fuse_write_update_size(inode, offset + length);
+
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ truncate_pagecache_range(inode, offset, offset + length - 1);
+
+ fuse_invalidate_attr(inode);
+
+out:
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ fuse_release_nowrite(inode);
+ mutex_unlock(&inode->i_mutex);
+ }
+
return err;
}
fc->dont_mask = 1;
if (arg->flags & FUSE_AUTO_INVAL_DATA)
fc->auto_inval_data = 1;
- if (arg->flags & FUSE_DO_READDIRPLUS)
+ if (arg->flags & FUSE_DO_READDIRPLUS) {
fc->do_readdirplus = 1;
- if (arg->flags & FUSE_READDIRPLUS_AUTO)
- fc->readdirplus_auto = 1;
+ if (arg->flags & FUSE_READDIRPLUS_AUTO)
+ fc->readdirplus_auto = 1;
+ }
if (arg->flags & FUSE_ASYNC_DIO)
fc->async_dio = 1;
} else {
config GFS2_FS_LOCKING_DLM
bool "GFS2 DLM locking"
depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
- HOTPLUG && DLM && CONFIGFS_FS && SYSFS
+ HOTPLUG && CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS)
help
Multiple node locking module for GFS2
if (ret)
return ret;
+ ret = get_write_access(inode);
+ if (ret)
+ return ret;
+
inode_dio_wait(inode);
ret = gfs2_rs_alloc(GFS2_I(inode));
if (ret)
- return ret;
+ goto out;
oldsize = inode->i_size;
- if (newsize >= oldsize)
- return do_grow(inode, newsize);
+ if (newsize >= oldsize) {
+ ret = do_grow(inode, newsize);
+ goto out;
+ }
- return do_shrink(inode, oldsize, newsize);
+ ret = do_shrink(inode, oldsize, newsize);
+out:
+ put_write_access(inode);
+ return ret;
}
int gfs2_truncatei_resume(struct gfs2_inode *ip)
return ERR_PTR(-EIO);
}
- hc = kmalloc(hsize, GFP_NOFS);
- ret = -ENOMEM;
+ hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
+ if (hc == NULL)
+ hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL);
+
if (hc == NULL)
return ERR_PTR(-ENOMEM);
ret = gfs2_dir_read_data(ip, hc, hsize);
if (ret < 0) {
- kfree(hc);
+ if (is_vmalloc_addr(hc))
+ vfree(hc);
+ else
+ kfree(hc);
return ERR_PTR(ret);
}
spin_lock(&inode->i_lock);
- if (ip->i_hash_cache)
- kfree(hc);
- else
+ if (ip->i_hash_cache) {
+ if (is_vmalloc_addr(hc))
+ vfree(hc);
+ else
+ kfree(hc);
+ } else {
ip->i_hash_cache = hc;
+ }
spin_unlock(&inode->i_lock);
return ip->i_hash_cache;
{
__be64 *hc = ip->i_hash_cache;
ip->i_hash_cache = NULL;
- kfree(hc);
+ if (is_vmalloc_addr(hc))
+ vfree(hc);
+ else
+ kfree(hc);
}
static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
if (IS_ERR(hc))
return PTR_ERR(hc);
- h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS);
+ h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
+ if (hc2 == NULL)
+ hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
+
if (!hc2)
return -ENOMEM;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
out_kfree:
- kfree(hc2);
+ if (is_vmalloc_addr(hc2))
+ vfree(hc2);
+ else
+ kfree(hc2);
return error;
}
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
ht = kzalloc(size, GFP_NOFS);
+ if (ht == NULL)
+ ht = vzalloc(size);
if (!ht)
return -ENOMEM;
gfs2_rlist_free(&rlist);
gfs2_quota_unhold(dip);
out:
- kfree(ht);
+ if (is_vmalloc_addr(ht))
+ vfree(ht);
+ else
+ kfree(ht);
return error;
}
/* Update file times before taking page lock */
file_update_time(vma->vm_file);
+ ret = get_write_access(inode);
+ if (ret)
+ goto out;
+
ret = gfs2_rs_alloc(ip);
if (ret)
- return ret;
+ goto out_write_access;
gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
- goto out;
+ goto out_uninit;
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
gfs2_quota_unlock(ip);
out_unlock:
gfs2_glock_dq(&gh);
-out:
+out_uninit:
gfs2_holder_uninit(&gh);
if (ret == 0) {
set_page_dirty(page);
wait_for_stable_page(page);
}
+out_write_access:
+ put_write_access(inode);
+out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
}
kfree(file->private_data);
file->private_data = NULL;
- if ((file->f_mode & FMODE_WRITE) &&
- (atomic_read(&inode->i_writecount) == 1))
- gfs2_rs_delete(ip);
+ if (!(file->f_mode & FMODE_WRITE))
+ return 0;
+ gfs2_rs_delete(ip);
return 0;
}
return inode;
fail_refresh:
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
ip->i_iopen_gh.gh_gl->gl_object = NULL;
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_iopen:
fs_err(sdp, "Error %d writing to log\n", error);
}
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, i) {
page = bvec->bv_page;
if (page_has_buffers(page))
gfs2_end_log_write_bh(sdp, bvec, error);
if (total > limit)
num = limit;
gfs2_log_unlock(sdp);
- page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
+ page = gfs2_get_log_desc(sdp,
+ is_databuf ? GFS2_LOG_DESC_JDATA :
+ GFS2_LOG_DESC_METADATA, num + 1, num);
ld = page_address(page);
gfs2_log_lock(sdp);
ptr = (__be64 *)(ld + 1);
{
struct kqid qid = qd->qd_id;
return (2 * (u64)from_kqid(&init_user_ns, qid)) +
- (qid.type == USRQUOTA) ? 0 : 1;
+ ((qid.type == USRQUOTA) ? 0 : 1);
}
static u64 qd2offset(struct gfs2_quota_data *qd)
goto unlock_out;
}
- gfs2_trans_add_meta(ip->i_gl, bh);
+ gfs2_trans_add_data(ip->i_gl, bh);
kaddr = kmap_atomic(page);
if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
*/
void gfs2_rs_delete(struct gfs2_inode *ip)
{
+ struct inode *inode = &ip->i_inode;
+
down_write(&ip->i_rw_mutex);
- if (ip->i_res) {
+ if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
gfs2_rs_deltree(ip->i_res);
BUG_ON(ip->i_res->rs_free);
kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
u32 extlen;
u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
int ret;
+ struct inode *inode = &ip->i_inode;
- extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
- extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+ if (S_ISDIR(inode->i_mode))
+ extlen = 1;
+ else {
+ extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+ extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+ }
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
return;
/* Must not read inode block until block type has been verified */
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) {
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
goto out;
}
if (gfs2_rs_active(ip->i_res))
gfs2_rs_deltree(ip->i_res);
- if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
+ if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq(&ip->i_iopen_gh);
+ }
gfs2_holder_uninit(&ip->i_iopen_gh);
gfs2_glock_dq_uninit(&gh);
if (error && error != GLR_TRYFAILED && error != -EROFS)
ip->i_gl = NULL;
if (ip->i_iopen_gh.gh_gl) {
ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
}
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, num);
spin_unlock(&tree->hash_lock);
- BUG_ON(node);
+ if (node) {
+ pr_crit("new node %u already hashed?\n", num);
+ WARN_ON(1);
+ return node;
+ }
node = __hfs_bnode_create(tree, num);
if (!node)
return ERR_PTR(-ENOMEM);
if (whence == SEEK_DATA || whence == SEEK_HOLE)
return -EINVAL;
+ mutex_lock(&i->i_mutex);
hpfs_lock(s);
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
- mutex_lock(&i->i_mutex);
pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
while (pos != new_off) {
if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
else goto fail;
if (pos == 12) goto fail;
}
- mutex_unlock(&i->i_mutex);
+ hpfs_add_pos(i, &filp->f_pos);
ok:
+ filp->f_pos = new_off;
hpfs_unlock(s);
- return filp->f_pos = new_off;
-fail:
mutex_unlock(&i->i_mutex);
+ return new_off;
+fail:
/*printk("illegal lseek: %016llx\n", new_off);*/
hpfs_unlock(s);
+ mutex_unlock(&i->i_mutex);
return -ESPIPE;
}
{
struct inode *inode = mapping->host;
+ hpfs_lock(inode->i_sb);
+
if (to > inode->i_size) {
truncate_pagecache(inode, to, inode->i_size);
hpfs_truncate(inode);
}
+
+ hpfs_unlock(inode->i_sb);
}
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
- submit_bio(READ_SYNC, bio);
+ /*check if journaling to disk has been disabled*/
+ if (log->no_integrity) {
+ bio->bi_size = 0;
+ lbmIODone(bio, 0);
+ } else {
+ submit_bio(READ_SYNC, bio);
+ }
wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
+ int rc = 0;
if (!(sb->s_flags & MS_RDONLY)) {
txQuiesce(sb);
- lmLogShutdown(log);
- updateSuper(sb, FM_CLEAN);
+ rc = lmLogShutdown(log);
+ if (rc) {
+ jfs_error(sb, "jfs_freeze: lmLogShutdown failed");
+
+ /* let operations fail rather than hang */
+ txResume(sb);
+
+ return rc;
+ }
+ rc = updateSuper(sb, FM_CLEAN);
+ if (rc) {
+ jfs_err("jfs_freeze: updateSuper failed\n");
+ /*
+ * Don't fail here. Everything succeeded except
+ * marking the superblock clean, so there's really
+ * no harm in leaving it frozen for now.
+ */
+ }
}
return 0;
}
int rc = 0;
if (!(sb->s_flags & MS_RDONLY)) {
- updateSuper(sb, FM_MOUNT);
- if ((rc = lmLogInit(log)))
- jfs_err("jfs_unlock failed with return code %d", rc);
- else
- txResume(sb);
+ rc = updateSuper(sb, FM_MOUNT);
+ if (rc) {
+ jfs_error(sb, "jfs_unfreeze: updateSuper failed");
+ goto out;
+ }
+ rc = lmLogInit(log);
+ if (rc)
+ jfs_error(sb, "jfs_unfreeze: lmLogInit failed");
+out:
+ txResume(sb);
}
- return 0;
+ return rc;
}
static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
spin_lock(&tbl->slot_tbl_lock);
/* state manager is resetting the session */
- if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+ if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
spin_unlock(&tbl->slot_tbl_lock);
status = htonl(NFS4ERR_DELAY);
/* Return NFS4ERR_BADSESSION if we're draining the session
* A single slot, so highest used slotid is either 0 or -1
*/
tbl->highest_used_slotid = NFS4_NO_SLOT;
- nfs4_session_drain_complete(session, tbl);
+ nfs4_slot_tbl_drain_complete(tbl);
spin_unlock(&tbl->slot_tbl_lock);
}
__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
if (error == -EINVAL)
- error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL);
+ error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
if (error < 0)
goto error;
task->tk_timeout = 0;
spin_lock(&tbl->slot_tbl_lock);
- if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
+ if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
!args->sa_privileged) {
/* The state manager will wait until the slot table is empty */
dprintk("%s session is draining\n", __func__);
struct nfs4_state *state = opendata->state;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
- int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
+ int open_mode = opendata->o_arg.open_flags;
fmode_t fmode = opendata->o_arg.fmode;
nfs4_stateid stateid;
int ret = -EAGAIN;
tbl->highest_used_slotid = new_max;
else {
tbl->highest_used_slotid = NFS4_NO_SLOT;
- nfs4_session_drain_complete(tbl->session, tbl);
+ nfs4_slot_tbl_drain_complete(tbl);
}
}
dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
struct nfs4_slot *slot = pslot;
struct nfs4_slot_table *tbl = slot->table;
- if (nfs4_session_draining(tbl->session) && !args->sa_privileged)
+ if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
return false;
slot->generation = tbl->generation;
args->sa_slot = slot;
};
/* Sessions */
+enum nfs4_slot_tbl_state {
+ NFS4_SLOT_TBL_DRAINING,
+};
+
#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
struct nfs4_slot_table {
struct nfs4_session *session; /* Parent session */
unsigned long generation; /* Generation counter for
target_highest_slotid */
struct completion complete;
+ unsigned long slot_tbl_state;
};
/*
enum nfs4_session_state {
NFS4_SESSION_INITING,
- NFS4_SESSION_DRAINING,
};
#if defined(CONFIG_NFS_V4_1)
extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
-extern void nfs4_session_drain_complete(struct nfs4_session *session,
- struct nfs4_slot_table *tbl);
+extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
-static inline bool nfs4_session_draining(struct nfs4_session *session)
+static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl)
{
- return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state);
+ return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
}
bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
if (ses == NULL)
return;
tbl = &ses->fc_slot_table;
- if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
+ if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
spin_lock(&tbl->slot_tbl_lock);
nfs41_wake_slot_table(tbl);
spin_unlock(&tbl->slot_tbl_lock);
/*
* Signal state manager thread if session fore channel is drained
*/
-void nfs4_session_drain_complete(struct nfs4_session *session,
- struct nfs4_slot_table *tbl)
+void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
{
- if (nfs4_session_draining(session))
+ if (nfs4_slot_tbl_draining(tbl))
complete(&tbl->complete);
}
-static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
+static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
{
+ set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
spin_lock(&tbl->slot_tbl_lock);
if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
INIT_COMPLETION(tbl->complete);
struct nfs4_session *ses = clp->cl_session;
int ret = 0;
- set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
/* back channel */
- ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+ ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
if (ret)
return ret;
/* fore channel */
- return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+ return nfs4_drain_slot_tbl(&ses->fc_slot_table);
}
static void nfs41_finish_session_reset(struct nfs_client *clp)
args->namlen = data->namlen;
args->bsize = data->bsize;
+ args->auth_flavors[0] = RPC_AUTH_UNIX;
if (data->flags & NFS_MOUNT_SECFLAVOUR)
args->auth_flavors[0] = data->pseudoflavor;
if (!args->nfs_server.hostname)
goto out_no_address;
args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+ args->auth_flavors[0] = RPC_AUTH_UNIX;
if (data->auth_flavourlen) {
if (data->auth_flavourlen > 1)
goto out_inval_auth;
static int nilfs_set_page_dirty(struct page *page)
{
- int ret = __set_page_dirty_buffers(page);
+ int ret = __set_page_dirty_nobuffers(page);
- if (ret) {
+ if (page_has_buffers(page)) {
struct inode *inode = page->mapping->host;
- unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
+ unsigned nr_dirty = 0;
+ struct buffer_head *bh, *head;
- nilfs_set_file_dirty(inode, nr_dirty);
+ /*
+ * This page is locked by callers, and no other thread
+ * concurrently marks its buffers dirty since they are
+ * only dirtied through routines in fs/buffer.c in
+ * which call sites of mark_buffer_dirty are protected
+ * by page lock.
+ */
+ bh = head = page_buffers(page);
+ do {
+ /* Do not mark hole blocks dirty */
+ if (buffer_dirty(bh) || !buffer_mapped(bh))
+ continue;
+
+ set_buffer_dirty(bh);
+ nr_dirty++;
+ } while (bh = bh->b_this_page, bh != head);
+
+ if (nr_dirty)
+ nilfs_set_file_dirty(inode, nr_dirty);
}
return ret;
}
&hole_size, &rec, &is_last);
if (ret) {
mlog_errno(ret);
- goto out;
+ goto out_unlock;
}
if (rec.e_blkno == 0ULL) {
ret = ocfs2_inode_lock(inode, NULL, 1);
if (ret < 0) {
mlog_errno(ret);
- goto out_sems;
+ goto out;
}
ocfs2_inode_unlock(inode, 1);
if (peer_mnt == mnt)
peer_mnt = NULL;
}
- if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
+ if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
+ list_empty(&mnt->mnt_share))
mnt_release_group_id(mnt);
list_del_init(&mnt->mnt_share);
nstr[notify & ~SIGEV_THREAD_ID],
(notify & SIGEV_THREAD_ID) ? "tid" : "pid",
pid_nr_ns(timer->it_pid, tp->ns));
+ seq_printf(m, "ClockID: %d\n", timer->it_clock);
return 0;
}
struct inode *inode = file_inode(filp);
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
- loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1);
+ loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
unsigned long npages = dir_pages(inode);
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
next_pos = deh_offset(deh) + 1;
if (item_moved(&tmp_ih, &path_to_entry)) {
+ set_cpu_key_k_offset(&pos_key,
+ next_pos);
goto research;
}
} /* for */
TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
- if (insert_inode_locked4(inode, args.objectid,
- reiserfs_find_actor, &args) < 0) {
+
+ reiserfs_write_unlock(inode->i_sb);
+ err = insert_inode_locked4(inode, args.objectid,
+ reiserfs_find_actor, &args);
+ reiserfs_write_lock(inode->i_sb);
+ if (err) {
err = -EINVAL;
goto out_bad_inode;
}
+
if (old_format_only(sb))
/* not a perfect generation count, as object ids can be reused, but
** this is as good as reiserfs can do right now.
static int chown_one_xattr(struct dentry *dentry, void *data)
{
struct iattr *attrs = data;
- return reiserfs_setattr(dentry, attrs);
+ int ia_valid = attrs->ia_valid;
+ int err;
+
+ /*
+ * We only want the ownership bits. Otherwise, we'll do
+ * things like change a directory to a regular file if
+ * ATTR_MODE is set.
+ */
+ attrs->ia_valid &= (ATTR_UID|ATTR_GID);
+ err = reiserfs_setattr(dentry, attrs);
+ attrs->ia_valid = ia_valid;
+
+ return err;
}
/* No i_mutex, but the inode is unconnected. */
int depth;
int error;
+ if (IS_PRIVATE(inode))
+ return 0;
+
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_vnodeops.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
#include "xfs_trace.h"
#include <linux/slab.h>
#include <linux/xattr.h>
*/
STATIC struct posix_acl *
-xfs_acl_from_disk(struct xfs_acl *aclp)
+xfs_acl_from_disk(
+ struct xfs_acl *aclp,
+ int max_entries)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
unsigned int count, i;
count = be32_to_cpu(aclp->acl_cnt);
- if (count > XFS_ACL_MAX_ENTRIES)
+ if (count > max_entries)
return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
struct xfs_inode *ip = XFS_I(inode);
struct posix_acl *acl;
struct xfs_acl *xfs_acl;
- int len = sizeof(struct xfs_acl);
unsigned char *ea_name;
int error;
+ int len;
acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED)
* If we have a cached ACLs value just return it, not need to
* go out to the disk.
*/
-
- xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
+ len = XFS_ACL_MAX_SIZE(ip->i_mount);
+ xfs_acl = kzalloc(len, GFP_KERNEL);
if (!xfs_acl)
return ERR_PTR(-ENOMEM);
goto out;
}
- acl = xfs_acl_from_disk(xfs_acl);
+ acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount));
if (IS_ERR(acl))
goto out;
if (acl) {
struct xfs_acl *xfs_acl;
- int len;
+ int len = XFS_ACL_MAX_SIZE(ip->i_mount);
- xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
+ xfs_acl = kzalloc(len, GFP_KERNEL);
if (!xfs_acl)
return -ENOMEM;
xfs_acl_to_disk(xfs_acl, acl);
- len = sizeof(struct xfs_acl) -
- (sizeof(struct xfs_acl_entry) *
- (XFS_ACL_MAX_ENTRIES - acl->a_count));
+
+ /* subtract away the unused acl entries */
+ len -= sizeof(struct xfs_acl_entry) *
+ (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
len, ATTR_ROOT);
static int
xfs_acl_exists(struct inode *inode, unsigned char *name)
{
- int len = sizeof(struct xfs_acl);
+ int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb));
return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
ATTR_ROOT|ATTR_KERNOVAL) == 0);
goto out_release;
error = -EINVAL;
- if (acl->a_count > XFS_ACL_MAX_ENTRIES)
+ if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
goto out_release;
if (type == ACL_TYPE_ACCESS) {
struct posix_acl;
struct xfs_inode;
-#define XFS_ACL_MAX_ENTRIES 25
#define XFS_ACL_NOT_PRESENT (-1)
/* On-disk XFS access control list structure */
+struct xfs_acl_entry {
+ __be32 ae_tag;
+ __be32 ae_id;
+ __be16 ae_perm;
+ __be16 ae_pad; /* fill the implicit hole in the structure */
+};
+
struct xfs_acl {
- __be32 acl_cnt;
- struct xfs_acl_entry {
- __be32 ae_tag;
- __be32 ae_id;
- __be16 ae_perm;
- } acl_entry[XFS_ACL_MAX_ENTRIES];
+ __be32 acl_cnt;
+ struct xfs_acl_entry acl_entry[0];
};
+/*
+ * The number of ACL entries allowed is defined by the on-disk format.
+ * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
+ * limited only by the maximum size of the xattr that stores the information.
+ */
+#define XFS_ACL_MAX_ENTRIES(mp) \
+ (xfs_sb_version_hascrc(&mp->m_sb) \
+ ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
+ sizeof(struct xfs_acl_entry) \
+ : 25)
+
+#define XFS_ACL_MAX_SIZE(mp) \
+ (sizeof(struct xfs_acl) + \
+ sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
+
/* On-disk XFS extended attribute names */
#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
i_size_read(inode));
+ /*
+ * If the current map does not span the entire page we are about to try
+ * to write, then give up. The only way we can write a page that spans
+ * multiple mappings in a single writeback iteration is via the
+ * xfs_vm_writepage() function. Data integrity writeback requires the
+ * entire page to be written in a single attempt, otherwise the part of
+ * the page we don't write here doesn't get written as part of the data
+ * integrity sync.
+ *
+ * For normal writeback, we also don't attempt to write partial pages
+ * here as it simply means that write_cache_pages() will see it under
+ * writeback and ignore the page until some point in the future, at
+ * which time this will be the only page in the file that needs
+ * writeback. Hence for more optimal IO patterns, we should always
+ * avoid partial page writeback due to multiple mappings on a page here.
+ */
+ if (!xfs_imap_valid(inode, imap, end_offset))
+ goto fail_unlock_page;
+
len = 1 << inode->i_blkbits;
p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
PAGE_CACHE_SIZE);
*/
int
xfs_attr_shortform_allfit(
- struct xfs_buf *bp,
- struct xfs_inode *dp)
+ struct xfs_buf *bp,
+ struct xfs_inode *dp)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr_leaf_entry *entry;
xfs_attr_leaf_name_local_t *name_loc;
- int bytes, i;
+ struct xfs_attr3_icleaf_hdr leafhdr;
+ int bytes;
+ int i;
leaf = bp->b_addr;
- ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+ xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ entry = xfs_attr3_leaf_entryp(leaf);
- entry = &leaf->entries[0];
bytes = sizeof(struct xfs_attr_sf_hdr);
- for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
+ for (i = 0; i < leafhdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!(entry->flags & XFS_ATTR_LOCAL))
return(0);
if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
return(0);
- bytes += sizeof(struct xfs_attr_sf_entry)-1
+ bytes += sizeof(struct xfs_attr_sf_entry) - 1
+ name_loc->namelen
+ be16_to_cpu(name_loc->valuelen);
}
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
- return(-1);
- return(xfs_attr_shortform_bytesfit(dp, bytes));
+ return -1;
+ return xfs_attr_shortform_bytesfit(dp, bytes);
}
/*
name_rmt->valuelen = 0;
name_rmt->valueblk = 0;
args->rmtblkno = 1;
- args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
}
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
STATIC void
xfs_attr3_leaf_compact(
struct xfs_da_args *args,
- struct xfs_attr3_icleaf_hdr *ichdr_d,
+ struct xfs_attr3_icleaf_hdr *ichdr_dst,
struct xfs_buf *bp)
{
- xfs_attr_leafblock_t *leaf_s, *leaf_d;
- struct xfs_attr3_icleaf_hdr ichdr_s;
+ struct xfs_attr_leafblock *leaf_src;
+ struct xfs_attr_leafblock *leaf_dst;
+ struct xfs_attr3_icleaf_hdr ichdr_src;
struct xfs_trans *trans = args->trans;
struct xfs_mount *mp = trans->t_mountp;
char *tmpbuffer;
trace_xfs_attr_leaf_compact(args);
tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
memset(bp->b_addr, 0, XFS_LBSIZE(mp));
+ leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
+ leaf_dst = bp->b_addr;
/*
- * Copy basic information
+ * Copy the on-disk header back into the destination buffer to ensure
+ * all the information in the header that is not part of the incore
+ * header structure is preserved.
*/
- leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
- leaf_d = bp->b_addr;
- ichdr_s = *ichdr_d; /* struct copy */
- ichdr_d->firstused = XFS_LBSIZE(mp);
- ichdr_d->usedbytes = 0;
- ichdr_d->count = 0;
- ichdr_d->holes = 0;
- ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s);
- ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
+ memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
+
+ /* Initialise the incore headers */
+ ichdr_src = *ichdr_dst; /* struct copy */
+ ichdr_dst->firstused = XFS_LBSIZE(mp);
+ ichdr_dst->usedbytes = 0;
+ ichdr_dst->count = 0;
+ ichdr_dst->holes = 0;
+ ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
+ ichdr_dst->freemap[0].size = ichdr_dst->firstused -
+ ichdr_dst->freemap[0].base;
+
+
+ /* write the header back to initialise the underlying buffer */
+ xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
/*
* Copy all entry's in the same (sorted) order,
* but allocate name/value pairs packed and in sequence.
*/
- xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0,
- ichdr_s.count, mp);
+ xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0,
+ ichdr_src.count, mp);
/*
* this logs the entire buffer, but the caller must write the header
* back to the buffer when it is finished modifying it.
struct xfs_attr_leafblock *tmp_leaf;
struct xfs_attr3_icleaf_hdr tmphdr;
- tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP);
- memset(tmp_leaf, 0, state->blocksize);
- memset(&tmphdr, 0, sizeof(tmphdr));
+ tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP);
+
+ /*
+ * Copy the header into the temp leaf so that all the stuff
+ * not in the incore header is present and gets copied back in
+ * once we've moved all the entries.
+ */
+ memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
+ memset(&tmphdr, 0, sizeof(tmphdr));
tmphdr.magic = savehdr.magic;
tmphdr.forw = savehdr.forw;
tmphdr.back = savehdr.back;
tmphdr.firstused = state->blocksize;
+
+ /* write the header to the temp buffer to initialise it */
+ xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
+
if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
drop_blk->bp, &drophdr)) {
xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
+ args->valuelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
- be32_to_cpu(name_rmt->valuelen));
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(
+ args->dp->i_mount,
+ args->valuelen);
return XFS_ERROR(EEXIST);
}
}
ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
valuelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
+ valuelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
return 0;
args.valuelen = valuelen;
args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
+ args.rmtblkcnt = xfs_attr3_rmt_blocks(
+ args.dp->i_mount, valuelen);
retval = xfs_attr_rmtval_get(&args);
if (retval)
return retval;
name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
if (name_rmt->valueblk) {
lp->valueblk = be32_to_cpu(name_rmt->valueblk);
- lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
+ lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
be32_to_cpu(name_rmt->valuelen));
lp++;
}
* Each contiguous block has a header, so it is not just a simple attribute
* length to FSB conversion.
*/
-static int
+int
xfs_attr3_rmt_blocks(
struct xfs_mount *mp,
int attrlen)
{
- int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp,
- mp->m_sb.sb_blocksize);
- return (attrlen + buflen - 1) / buflen;
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
+ return (attrlen + buflen - 1) / buflen;
+ }
+ return XFS_B_TO_FSB(mp, attrlen);
+}
+
+/*
+ * Checking of the remote attribute header is split into two parts. The verifier
+ * does CRC, location and bounds checking, the unpacking function checks the
+ * attribute parameters and owner.
+ */
+static bool
+xfs_attr3_rmt_hdr_ok(
+ struct xfs_mount *mp,
+ void *ptr,
+ xfs_ino_t ino,
+ uint32_t offset,
+ uint32_t size,
+ xfs_daddr_t bno)
+{
+ struct xfs_attr3_rmt_hdr *rmt = ptr;
+
+ if (bno != be64_to_cpu(rmt->rm_blkno))
+ return false;
+ if (offset != be32_to_cpu(rmt->rm_offset))
+ return false;
+ if (size != be32_to_cpu(rmt->rm_bytes))
+ return false;
+ if (ino != be64_to_cpu(rmt->rm_owner))
+ return false;
+
+ /* ok */
+ return true;
}
static bool
xfs_attr3_rmt_verify(
- struct xfs_buf *bp)
+ struct xfs_mount *mp,
+ void *ptr,
+ int fsbsize,
+ xfs_daddr_t bno)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+ struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
return false;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
return false;
- if (bp->b_bn != be64_to_cpu(rmt->rm_blkno))
+ if (be64_to_cpu(rmt->rm_blkno) != bno)
+ return false;
+ if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
return false;
if (be32_to_cpu(rmt->rm_offset) +
be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ char *ptr;
+ int len;
+ bool corrupt = false;
+ xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
- if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
- XFS_ATTR3_RMT_CRC_OFF) ||
- !xfs_attr3_rmt_verify(bp)) {
+ ptr = bp->b_addr;
+ bno = bp->b_bn;
+ len = BBTOB(bp->b_length);
+ ASSERT(len >= XFS_LBSIZE(mp));
+
+ while (len > 0) {
+ if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
+ XFS_ATTR3_RMT_CRC_OFF)) {
+ corrupt = true;
+ break;
+ }
+ if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
+ corrupt = true;
+ break;
+ }
+ len -= XFS_LBSIZE(mp);
+ ptr += XFS_LBSIZE(mp);
+ bno += mp->m_bsize;
+ }
+
+ if (corrupt) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
- }
+ } else
+ ASSERT(len == 0);
}
static void
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
+ char *ptr;
+ int len;
+ xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
- if (!xfs_attr3_rmt_verify(bp)) {
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
- xfs_buf_ioerror(bp, EFSCORRUPTED);
- return;
- }
+ ptr = bp->b_addr;
+ bno = bp->b_bn;
+ len = BBTOB(bp->b_length);
+ ASSERT(len >= XFS_LBSIZE(mp));
+
+ while (len > 0) {
+ if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
+ XFS_CORRUPTION_ERROR(__func__,
+ XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ return;
+ }
+ if (bip) {
+ struct xfs_attr3_rmt_hdr *rmt;
+
+ rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+ rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ }
+ xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
- if (bip) {
- struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
- rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ len -= XFS_LBSIZE(mp);
+ ptr += XFS_LBSIZE(mp);
+ bno += mp->m_bsize;
}
- xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
- XFS_ATTR3_RMT_CRC_OFF);
+ ASSERT(len == 0);
}
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.verify_write = xfs_attr3_rmt_write_verify,
};
-static int
+STATIC int
xfs_attr3_rmt_hdr_set(
struct xfs_mount *mp,
+ void *ptr,
xfs_ino_t ino,
uint32_t offset,
uint32_t size,
- struct xfs_buf *bp)
+ xfs_daddr_t bno)
{
- struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+ struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return 0;
rmt->rm_bytes = cpu_to_be32(size);
uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
rmt->rm_owner = cpu_to_be64(ino);
- rmt->rm_blkno = cpu_to_be64(bp->b_bn);
- bp->b_ops = &xfs_attr3_rmt_buf_ops;
+ rmt->rm_blkno = cpu_to_be64(bno);
return sizeof(struct xfs_attr3_rmt_hdr);
}
/*
- * Checking of the remote attribute header is split into two parts. the verifier
- * does CRC, location and bounds checking, the unpacking function checks the
- * attribute parameters and owner.
+ * Helper functions to copy attribute data in and out of the one disk extents
*/
-static bool
-xfs_attr3_rmt_hdr_ok(
- struct xfs_mount *mp,
- xfs_ino_t ino,
- uint32_t offset,
- uint32_t size,
- struct xfs_buf *bp)
+STATIC int
+xfs_attr_rmtval_copyout(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ xfs_ino_t ino,
+ int *offset,
+ int *valuelen,
+ char **dst)
{
- struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+ char *src = bp->b_addr;
+ xfs_daddr_t bno = bp->b_bn;
+ int len = BBTOB(bp->b_length);
- if (offset != be32_to_cpu(rmt->rm_offset))
- return false;
- if (size != be32_to_cpu(rmt->rm_bytes))
- return false;
- if (ino != be64_to_cpu(rmt->rm_owner))
- return false;
+ ASSERT(len >= XFS_LBSIZE(mp));
- /* ok */
- return true;
+ while (len > 0 && *valuelen > 0) {
+ int hdr_size = 0;
+ int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
+
+ byte_cnt = min_t(int, *valuelen, byte_cnt);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
+ byte_cnt, bno)) {
+ xfs_alert(mp,
+"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
+ bno, *offset, byte_cnt, ino);
+ return EFSCORRUPTED;
+ }
+ hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
+ }
+
+ memcpy(*dst, src + hdr_size, byte_cnt);
+
+ /* roll buffer forwards */
+ len -= XFS_LBSIZE(mp);
+ src += XFS_LBSIZE(mp);
+ bno += mp->m_bsize;
+
+ /* roll attribute data forwards */
+ *valuelen -= byte_cnt;
+ *dst += byte_cnt;
+ *offset += byte_cnt;
+ }
+ return 0;
+}
+
+STATIC void
+xfs_attr_rmtval_copyin(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ xfs_ino_t ino,
+ int *offset,
+ int *valuelen,
+ char **src)
+{
+ char *dst = bp->b_addr;
+ xfs_daddr_t bno = bp->b_bn;
+ int len = BBTOB(bp->b_length);
+
+ ASSERT(len >= XFS_LBSIZE(mp));
+
+ while (len > 0 && *valuelen > 0) {
+ int hdr_size;
+ int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
+
+ byte_cnt = min(*valuelen, byte_cnt);
+ hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
+ byte_cnt, bno);
+
+ memcpy(dst + hdr_size, *src, byte_cnt);
+
+ /*
+ * If this is the last block, zero the remainder of it.
+ * Check that we are actually the last block, too.
+ */
+ if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
+ ASSERT(*valuelen - byte_cnt == 0);
+ ASSERT(len == XFS_LBSIZE(mp));
+ memset(dst + hdr_size + byte_cnt, 0,
+ XFS_LBSIZE(mp) - hdr_size - byte_cnt);
+ }
+
+ /* roll buffer forwards */
+ len -= XFS_LBSIZE(mp);
+ dst += XFS_LBSIZE(mp);
+ bno += mp->m_bsize;
+
+ /* roll attribute data forwards */
+ *valuelen -= byte_cnt;
+ *src += byte_cnt;
+ *offset += byte_cnt;
+ }
}
/*
struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
struct xfs_mount *mp = args->dp->i_mount;
struct xfs_buf *bp;
- xfs_daddr_t dblkno;
xfs_dablk_t lblkno = args->rmtblkno;
- void *dst = args->value;
+ char *dst = args->value;
int valuelen = args->valuelen;
int nmap;
int error;
- int blkcnt;
+ int blkcnt = args->rmtblkcnt;
int i;
int offset = 0;
while (valuelen > 0) {
nmap = ATTR_RMTVALUE_MAPSIZE;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
- args->rmtblkcnt, map, &nmap,
+ blkcnt, map, &nmap,
XFS_BMAPI_ATTRFORK);
if (error)
return error;
ASSERT(nmap >= 1);
for (i = 0; (i < nmap) && (valuelen > 0); i++) {
- int byte_cnt;
- char *src;
+ xfs_daddr_t dblkno;
+ int dblkcnt;
ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
- blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+ dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
- dblkno, blkcnt, 0, &bp,
+ dblkno, dblkcnt, 0, &bp,
&xfs_attr3_rmt_buf_ops);
if (error)
return error;
- byte_cnt = min_t(int, valuelen, BBTOB(bp->b_length));
- byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
-
- src = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- if (!xfs_attr3_rmt_hdr_ok(mp, args->dp->i_ino,
- offset, byte_cnt, bp)) {
- xfs_alert(mp,
-"remote attribute header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
- offset, byte_cnt, args->dp->i_ino);
- xfs_buf_relse(bp);
- return EFSCORRUPTED;
-
- }
-
- src += sizeof(struct xfs_attr3_rmt_hdr);
- }
-
- memcpy(dst, src, byte_cnt);
+ error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
+ &offset, &valuelen,
+ &dst);
xfs_buf_relse(bp);
+ if (error)
+ return error;
- offset += byte_cnt;
- dst += byte_cnt;
- valuelen -= byte_cnt;
-
+ /* roll attribute extent map forwards */
lblkno += map[i].br_blockcount;
+ blkcnt -= map[i].br_blockcount;
}
}
ASSERT(valuelen == 0);
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
struct xfs_bmbt_irec map;
- struct xfs_buf *bp;
- xfs_daddr_t dblkno;
xfs_dablk_t lblkno;
xfs_fileoff_t lfileoff = 0;
- void *src = args->value;
+ char *src = args->value;
int blkcnt;
int valuelen;
int nmap;
int error;
- int hdrcnt = 0;
- bool crcs = xfs_sb_version_hascrc(&mp->m_sb);
int offset = 0;
trace_xfs_attr_rmtval_set(args);
* Find a "hole" in the attribute address space large enough for
* us to drop the new attribute's value into. Because CRC enable
* attributes have headers, we can't just do a straight byte to FSB
- * conversion. We calculate the worst case block count in this case
- * and we may not need that many, so we have to handle this when
- * allocating the blocks below.
+ * conversion and have to take the header space into account.
*/
- if (!crcs)
- blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
- else
- blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
-
+ blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
XFS_ATTR_FORK);
if (error)
return error;
- /* Start with the attribute data. We'll allocate the rest afterwards. */
- if (crcs)
- blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
-
args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
args->rmtblkcnt = blkcnt;
(map.br_startblock != HOLESTARTBLOCK));
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
- hdrcnt++;
-
- /*
- * If we have enough blocks for the attribute data, calculate
- * how many extra blocks we need for headers. We might run
- * through this multiple times in the case that the additional
- * headers in the blocks needed for the data fragments spills
- * into requiring more blocks. e.g. for 512 byte blocks, we'll
- * spill for another block every 9 headers we require in this
- * loop.
- */
- if (crcs && blkcnt == 0) {
- int total_len;
-
- total_len = args->valuelen +
- hdrcnt * sizeof(struct xfs_attr3_rmt_hdr);
- blkcnt = XFS_B_TO_FSB(mp, total_len);
- blkcnt -= args->rmtblkcnt;
- args->rmtblkcnt += blkcnt;
- }
/*
* Start the next trans in the chain.
* the INCOMPLETE flag.
*/
lblkno = args->rmtblkno;
+ blkcnt = args->rmtblkcnt;
valuelen = args->valuelen;
while (valuelen > 0) {
- int byte_cnt;
- char *buf;
+ struct xfs_buf *bp;
+ xfs_daddr_t dblkno;
+ int dblkcnt;
+
+ ASSERT(blkcnt > 0);
- /*
- * Try to remember where we decided to put the value.
- */
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
- args->rmtblkcnt, &map, &nmap,
+ blkcnt, &map, &nmap,
XFS_BMAPI_ATTRFORK);
if (error)
return(error);
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
- blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+ dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
- bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
+ bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
if (!bp)
return ENOMEM;
bp->b_ops = &xfs_attr3_rmt_buf_ops;
- byte_cnt = BBTOB(bp->b_length);
- byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
- if (valuelen < byte_cnt)
- byte_cnt = valuelen;
-
- buf = bp->b_addr;
- buf += xfs_attr3_rmt_hdr_set(mp, dp->i_ino, offset,
- byte_cnt, bp);
- memcpy(buf, src, byte_cnt);
-
- if (byte_cnt < BBTOB(bp->b_length))
- xfs_buf_zero(bp, byte_cnt,
- BBTOB(bp->b_length) - byte_cnt);
+ xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
+ &valuelen, &src);
error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
xfs_buf_relse(bp);
if (error)
return error;
- src += byte_cnt;
- valuelen -= byte_cnt;
- offset += byte_cnt;
- hdrcnt--;
+ /* roll attribute extent map forwards */
lblkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
}
ASSERT(valuelen == 0);
- ASSERT(hdrcnt == 0);
return 0;
}
* out-of-line buffer that it is stored on.
*/
int
-xfs_attr_rmtval_remove(xfs_da_args_t *args)
+xfs_attr_rmtval_remove(
+ struct xfs_da_args *args)
{
- xfs_mount_t *mp;
- xfs_bmbt_irec_t map;
- xfs_buf_t *bp;
- xfs_daddr_t dblkno;
- xfs_dablk_t lblkno;
- int valuelen, blkcnt, nmap, error, done, committed;
+ struct xfs_mount *mp = args->dp->i_mount;
+ xfs_dablk_t lblkno;
+ int blkcnt;
+ int error;
+ int done;
trace_xfs_attr_rmtval_remove(args);
- mp = args->dp->i_mount;
-
/*
- * Roll through the "value", invalidating the attribute value's
- * blocks.
+ * Roll through the "value", invalidating the attribute value's blocks.
+ * Note that args->rmtblkcnt is the minimum number of data blocks we'll
+ * see for a CRC enabled remote attribute. Each extent will have a
+ * header, and so we may have more blocks than we realise here. If we
+ * fail to map the blocks correctly, we'll have problems with the buffer
+ * lookups.
*/
lblkno = args->rmtblkno;
- valuelen = args->rmtblkcnt;
- while (valuelen > 0) {
+ blkcnt = args->rmtblkcnt;
+ while (blkcnt > 0) {
+ struct xfs_bmbt_irec map;
+ struct xfs_buf *bp;
+ xfs_daddr_t dblkno;
+ int dblkcnt;
+ int nmap;
+
/*
* Try to remember where we decided to put the value.
*/
nmap = 1;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
- args->rmtblkcnt, &map, &nmap,
- XFS_BMAPI_ATTRFORK);
+ blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
if (error)
return(error);
ASSERT(nmap == 1);
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
- blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+ dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
/*
* If the "remote" value is in the cache, remove it.
*/
- bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
+ bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
if (bp) {
xfs_buf_stale(bp);
xfs_buf_relse(bp);
bp = NULL;
}
- valuelen -= map.br_blockcount;
-
lblkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
}
/*
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
+ int committed;
+
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
#define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */
+/*
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
struct xfs_attr3_rmt_hdr {
__be32 rm_magic;
__be32 rm_offset;
extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
+int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
+
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_rmtval_set(struct xfs_da_args *args);
int xfs_attr_rmtval_remove(struct xfs_da_args *args);
xfs_alert(btp->bt_mount,
"%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
__func__, blkno, eofs);
+ WARN_ON(1);
return NULL;
}
{
xfs_buftarg_t *btp;
- btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+ btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
-/*
- * You would think we need to bump the nvecs here too, but we do not
- * this number is used by recovery, and it gets confused by the boundary
- * split here
- * nvecs++;
- */
+ nvecs++;
vecp++;
first_bit = next_bit;
last_bit = next_bit;
break;
return;
case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
bp->b_ops = &xfs_attr3_leaf_buf_ops;
bp->b_ops->verify_read(bp);
return;
ASSERT(nirecs >= 1);
if (nirecs > 1) {
- map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
+ map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
+ KM_SLEEP | KM_NOFS);
if (!map)
return ENOMEM;
*mapp = map;
* Optimize the one-block case.
*/
if (nfsb != 1)
- irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
+ irecs = kmem_zalloc(sizeof(irec) * nfsb,
+ KM_SLEEP | KM_NOFS);
nirecs = nfsb;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
int taforkblks = 0;
__uint64_t tmp;
+ /*
+ * We have no way of updating owner information in the BMBT blocks for
+ * each inode on CRC enabled filesystems, so to avoid corrupting the
+ * this metadata we simply don't allow extent swaps to occur.
+ */
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return XFS_ERROR(EINVAL);
+
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
if (!tempifp) {
error = XFS_ERROR(ENOMEM);
__be32 firstdb; /* db of first entry */
__be32 nvalid; /* count of valid entries */
__be32 nused; /* count of used entries */
+ __be32 pad; /* 64 bit alignment. */
};
struct xfs_dir3_free {
mp->m_sb.sb_blocksize);
map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
(length * sizeof(struct xfs_bmbt_irec)),
- KM_SLEEP);
+ KM_SLEEP | KM_NOFS);
map_info->map_size = length;
/*
* Initialize the new block to be empty, and remember
* its first slot as our empty slot.
*/
- hdr.magic = XFS_DIR2_FREE_MAGIC;
- hdr.firstdb = 0;
- hdr.nused = 0;
- hdr.nvalid = 0;
+ memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr));
+ memset(&hdr, 0, sizeof(hdr));
+
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
hdr.magic = XFS_DIR3_FREE_MAGIC;
+
hdr3->hdr.blkno = cpu_to_be64(bp->b_bn);
hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
- }
+ } else
+ hdr.magic = XFS_DIR2_FREE_MAGIC;
xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
*bpp = bp;
return 0;
*/
freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
xfs_dir3_free_max_bests(mp);
- free->hdr.nvalid = 0;
- free->hdr.nused = 0;
} else {
free = fbp->b_addr;
bests = xfs_dir3_free_bests_p(mp, free);
d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
d->dd_diskdq.d_id = cpu_to_be32(curid);
d->dd_diskdq.d_flags = type;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+ xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
}
xfs_trans_dquot_buf(tp, bp,
dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
}
-STATIC void
-xfs_dquot_buf_calc_crc(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
- int i;
-
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return;
-
- for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) {
- xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
- offsetof(struct xfs_dqblk, dd_crc));
- }
-}
-
STATIC bool
xfs_dquot_buf_verify_crc(
struct xfs_mount *mp,
for (i = 0; i < ndquots; i++, d++) {
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
- offsetof(struct xfs_dqblk, dd_crc)))
+ XFS_DQUOT_CRC_OFF))
return false;
if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
return false;
}
-
return true;
}
}
}
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
void
xfs_dquot_buf_write_verify(
struct xfs_buf *bp)
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
- xfs_dquot_buf_calc_crc(mp, bp);
}
const struct xfs_buf_ops xfs_dquot_buf_ops = {
* copy the lsn into the on-disk dquot now while we have the in memory
* dquot here. This can't be done later in the write verifier as we
* can't get access to the log item at that point in time.
+ *
+ * We also calculate the CRC here so that the on-disk dquot in the
+ * buffer always has a valid CRC. This ensures there is no possibility
+ * of a dquot without an up-to-date CRC getting to disk.
*/
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
+ xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
}
/*
{
ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) {
- __xfs_efi_release(efip);
-
/* recovery needs us to drop the EFI reference, too */
if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
__xfs_efi_release(efip);
+
+ __xfs_efi_release(efip);
+ /* efip may now have been freed, do not reference it again. */
}
}
#define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */
#define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */
#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */
+#define XFS_FSOP_GEOM_FLAGS_V5SB 0x8000 /* version 5 superblock */
/*
(xfs_sb_version_hasattr2(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
(xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_PROJID32 : 0);
+ XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
+ (xfs_sb_version_hascrc(&mp->m_sb) ?
+ XFS_FSOP_GEOM_FLAGS_V5SB : 0);
geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
mp->m_sb.sb_logsectsize : BBSIZE;
geo->rtsectsize = mp->m_sb.sb_blocksize;
dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
offset = ip->i_imap.im_boffset +
offsetof(xfs_dinode_t, di_next_unlinked);
+
+ /* need to recalc the inode CRC if appropriate */
+ xfs_dinode_calc_crc(mp, dip);
+
xfs_trans_inode_buf(tp, ibp);
xfs_trans_log_buf(tp, ibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
offset = ip->i_imap.im_boffset +
offsetof(xfs_dinode_t, di_next_unlinked);
+
+ /* need to recalc the inode CRC if appropriate */
+ xfs_dinode_calc_crc(mp, dip);
+
xfs_trans_inode_buf(tp, ibp);
xfs_trans_log_buf(tp, ibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
offset = ip->i_imap.im_boffset +
offsetof(xfs_dinode_t, di_next_unlinked);
+
+ /* need to recalc the inode CRC if appropriate */
+ xfs_dinode_calc_crc(mp, dip);
+
xfs_trans_inode_buf(tp, ibp);
xfs_trans_log_buf(tp, ibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
last_dip->di_next_unlinked = cpu_to_be32(next_agino);
ASSERT(next_agino != 0);
offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
+
+ /* need to recalc the inode CRC if appropriate */
+ xfs_dinode_calc_crc(mp, last_dip);
+
xfs_trans_inode_buf(tp, last_ibp);
xfs_trans_log_buf(tp, last_ibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
return 0;
}
+static void
+xfs_setattr_mode(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct iattr *iattr)
+{
+ struct inode *inode = VFS_I(ip);
+ umode_t mode = iattr->ia_mode;
+
+ ASSERT(tp);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ mode &= ~S_ISGID;
+
+ ip->i_d.di_mode &= S_IFMT;
+ ip->i_d.di_mode |= mode & ~S_IFMT;
+
+ inode->i_mode &= S_IFMT;
+ inode->i_mode |= mode & ~S_IFMT;
+}
+
int
xfs_setattr_nonsize(
struct xfs_inode *ip,
/*
* Change file access modes.
*/
- if (mask & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
-
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- mode &= ~S_ISGID;
-
- ip->i_d.di_mode &= S_IFMT;
- ip->i_d.di_mode |= mode & ~S_IFMT;
-
- inode->i_mode &= S_IFMT;
- inode->i_mode |= mode & ~S_IFMT;
- }
+ if (mask & ATTR_MODE)
+ xfs_setattr_mode(tp, ip, iattr);
/*
* Change file access or modified times.
return XFS_ERROR(error);
ASSERT(S_ISREG(ip->i_d.di_mode));
- ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
- ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
- ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+ ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+ ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
if (!(flags & XFS_ATTR_NOLOCK)) {
lock_flags |= XFS_IOLOCK_EXCL;
xfs_inode_clear_eofblocks_tag(ip);
}
+ /*
+ * Change file access modes.
+ */
+ if (mask & ATTR_MODE)
+ xfs_setattr_mode(tp, ip, iattr);
+
if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
new_lv = kmem_zalloc(sizeof(*new_lv) +
niovecs * sizeof(struct xfs_log_iovec),
- KM_SLEEP);
+ KM_SLEEP|KM_NOFS);
/* The allocated iovec region lies beyond the log vector. */
new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
}
/*
- * Sort the log items in the transaction. Cancelled buffers need
- * to be put first so they are processed before any items that might
- * modify the buffers. If they are cancelled, then the modifications
- * don't need to be replayed.
+ * Sort the log items in the transaction.
+ *
+ * The ordering constraints are defined by the inode allocation and unlink
+ * behaviour. The rules are:
+ *
+ * 1. Every item is only logged once in a given transaction. Hence it
+ * represents the last logged state of the item. Hence ordering is
+ * dependent on the order in which operations need to be performed so
+ * required initial conditions are always met.
+ *
+ * 2. Cancelled buffers are recorded in pass 1 in a separate table and
+ * there's nothing to replay from them so we can simply cull them
+ * from the transaction. However, we can't do that until after we've
+ * replayed all the other items because they may be dependent on the
+ * cancelled buffer and replaying the cancelled buffer can remove it
+ * form the cancelled buffer table. Hence they have tobe done last.
+ *
+ * 3. Inode allocation buffers must be replayed before inode items that
+ * read the buffer and replay changes into it.
+ *
+ * 4. Inode unlink buffers must be replayed after inode items are replayed.
+ * This ensures that inodes are completely flushed to the inode buffer
+ * in a "free" state before we remove the unlinked inode list pointer.
+ *
+ * Hence the ordering needs to be inode allocation buffers first, inode items
+ * second, inode unlink buffers third and cancelled buffers last.
+ *
+ * But there's a problem with that - we can't tell an inode allocation buffer
+ * apart from a regular buffer, so we can't separate them. We can, however,
+ * tell an inode unlink buffer from the others, and so we can separate them out
+ * from all the other buffers and move them to last.
+ *
+ * Hence, 4 lists, in order from head to tail:
+ * - buffer_list for all buffers except cancelled/inode unlink buffers
+ * - item_list for all non-buffer items
+ * - inode_buffer_list for inode unlink buffers
+ * - cancel_list for the cancelled buffers
*/
STATIC int
xlog_recover_reorder_trans(
{
xlog_recover_item_t *item, *n;
LIST_HEAD(sort_list);
+ LIST_HEAD(cancel_list);
+ LIST_HEAD(buffer_list);
+ LIST_HEAD(inode_buffer_list);
+ LIST_HEAD(inode_list);
list_splice_init(&trans->r_itemq, &sort_list);
list_for_each_entry_safe(item, n, &sort_list, ri_list) {
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
- if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
+ if (buf_f->blf_flags & XFS_BLF_CANCEL) {
trace_xfs_log_recover_item_reorder_head(log,
trans, item, pass);
- list_move(&item->ri_list, &trans->r_itemq);
+ list_move(&item->ri_list, &cancel_list);
break;
}
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+ list_move(&item->ri_list, &inode_buffer_list);
+ break;
+ }
+ list_move_tail(&item->ri_list, &buffer_list);
+ break;
case XFS_LI_INODE:
case XFS_LI_DQUOT:
case XFS_LI_QUOTAOFF:
case XFS_LI_EFI:
trace_xfs_log_recover_item_reorder_tail(log,
trans, item, pass);
- list_move_tail(&item->ri_list, &trans->r_itemq);
+ list_move_tail(&item->ri_list, &inode_list);
break;
default:
xfs_warn(log->l_mp,
}
}
ASSERT(list_empty(&sort_list));
+ if (!list_empty(&buffer_list))
+ list_splice(&buffer_list, &trans->r_itemq);
+ if (!list_empty(&inode_list))
+ list_splice_tail(&inode_list, &trans->r_itemq);
+ if (!list_empty(&inode_buffer_list))
+ list_splice_tail(&inode_buffer_list, &trans->r_itemq);
+ if (!list_empty(&cancel_list))
+ list_splice_tail(&cancel_list, &trans->r_itemq);
return 0;
}
buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
next_unlinked_offset);
*buffer_nextp = *logged_nextp;
+
+ /*
+ * If necessary, recalculate the CRC in the on-disk inode. We
+ * have to leave the inode in a consistent state for whoever
+ * reads it next....
+ */
+ xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
+ xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
+
}
return 0;
ASSERT(BBTOB(bp->b_io_length) >=
((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
+ /*
+ * The dirty regions logged in the buffer, even though
+ * contiguous, may span multiple chunks. This is because the
+ * dirty region may span a physical page boundary in a buffer
+ * and hence be split into two separate vectors for writing into
+ * the log. Hence we need to trim nbits back to the length of
+ * the current region being copied out of the log.
+ */
+ if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
+ nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
+
/*
* Do a sanity check if this is a dquot buffer. Just checking
* the first dquot in the buffer should do. XXXThis is
d->dd_diskdq.d_flags = type;
d->dd_diskdq.d_id = cpu_to_be32(id);
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+ xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
+
return errs;
}
}
memcpy(ddq, recddq, item->ri_buf[1].i_len);
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
ASSERT(dq_f->qlf_size == 2);
ASSERT(bp->b_target->bt_mount == mp);
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_cksum.h"
/*
* The global quota manager. There is only one of these for the entire
xfs_dqid_t id,
uint type)
{
- xfs_disk_dquot_t *ddq;
+ struct xfs_dqblk *dqb;
int j;
trace_xfs_reset_dqcounts(bp, _RET_IP_);
do_div(j, sizeof(xfs_dqblk_t));
ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
- ddq = bp->b_addr;
+ dqb = bp->b_addr;
for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
+ struct xfs_disk_dquot *ddq;
+
+ ddq = (struct xfs_disk_dquot *)&dqb[j];
+
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
ddq->d_bwarns = 0;
ddq->d_iwarns = 0;
ddq->d_rtbwarns = 0;
- ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ xfs_update_cksum((char *)&dqb[j],
+ sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
}
}
XFS_FSB_TO_DADDR(mp, bno),
mp->m_quotainfo->qi_dqchunklen, 0, &bp,
&xfs_dquot_buf_ops);
- if (error)
- break;
/*
- * XXX(hch): need to figure out if it makes sense to validate
- * the CRC here.
+ * CRC and validation errors will return a EFSCORRUPTED here. If
+ * this occurs, re-read without CRC validation so that we can
+ * repair the damage via xfs_qm_reset_dqcounts(). This process
+ * will leave a trace in the log indicating corruption has
+ * been detected.
*/
+ if (error == EFSCORRUPTED) {
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, bno),
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp,
+ NULL);
+ }
+
+ if (error)
+ break;
+
xfs_qm_reset_dqcounts(mp, bp, firstid, type);
xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
- /*
- * goto the next block.
- */
+
+ /* goto the next block. */
bno++;
firstid += mp->m_quotainfo->qi_dqperchunk;
}
if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
return 0;
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
- error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
- 0, 0, XFS_DEFAULT_LOG_COUNT);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return (error);
- }
-
/*
* We don't want to race with a quotaoff so take the quotaoff lock.
- * (We don't hold an inode lock, so there's nothing else to stop
- * a quotaoff from happening). (XXXThis doesn't currently happen
- * because we take the vfslock before calling xfs_qm_sysent).
+ * We don't hold an inode lock, so there's nothing else to stop
+ * a quotaoff from happening.
*/
mutex_lock(&q->qi_quotaofflock);
/*
- * Get the dquot (locked), and join it to the transaction.
- * Allocate the dquot if this doesn't exist.
+ * Get the dquot (locked) before we start, as we need to do a
+ * transaction to allocate it if it doesn't exist. Once we have the
+ * dquot, unlock it so we can start the next transaction safely. We hold
+ * a reference to the dquot, so it's safe to do this unlock/lock without
+ * it being reclaimed in the mean time.
*/
- if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
- xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+ error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
+ if (error) {
ASSERT(error != ENOENT);
goto out_unlock;
}
+ xfs_dqunlock(dqp);
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
+ error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
+ 0, 0, XFS_DEFAULT_LOG_COUNT);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto out_rele;
+ }
+
+ xfs_dqlock(dqp);
xfs_trans_dqjoin(tp, dqp);
ddq = &dqp->q_core;
xfs_trans_log_dquot(tp, dqp);
error = xfs_trans_commit(tp, 0);
- xfs_qm_dqrele(dqp);
- out_unlock:
+out_rele:
+ xfs_qm_dqrele(dqp);
+out_unlock:
mutex_unlock(&q->qi_quotaofflock);
return error;
}
uuid_t dd_uuid; /* location information */
} xfs_dqblk_t;
+#define XFS_DQUOT_CRC_OFF offsetof(struct xfs_dqblk, dd_crc)
+
/*
* flags for q_flags field in the dquot.
*/
}
}
+ /*
+ * V5 filesystems always use attr2 format for attributes.
+ */
+ if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ (mp->m_flags & XFS_MOUNT_NOATTR2)) {
+ xfs_warn(mp,
+"Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.",
+ MNTOPT_NOATTR2, MNTOPT_ATTR2);
+ return XFS_ERROR(EINVAL);
+ }
+
/*
* mkfs'ed attr2 will turn on attr2 mount unless explicitly
* told by noattr2 to turn it off
struct xfs_mount *mp,
int pathlen)
{
- int fsblocks = 0;
- int len = pathlen;
+ int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
- do {
- fsblocks++;
- len -= XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
- } while (len > 0);
-
- ASSERT(fsblocks <= XFS_SYMLINK_MAPS);
- return fsblocks;
+ return (pathlen + buflen - 1) / buflen;
}
static int
if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
fs_blocks = 0;
else
- fs_blocks = XFS_B_TO_FSB(mp, pathlen);
+ fs_blocks = xfs_symlink_blocks(mp, pathlen);
resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
cur_chunk = target_path;
offset = 0;
for (n = 0; n < nmaps; n++) {
- char *buf;
+ char *buf;
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
bp->b_ops = &xfs_symlink_buf_ops;
byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
- if (pathlen < byte_cnt) {
- byte_cnt = pathlen;
- }
+ byte_cnt = min(byte_cnt, pathlen);
buf = bp->b_addr;
buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
(char *)bp->b_addr);
}
+ ASSERT(pathlen == 0);
}
/*
xfs_mount_t *mp;
int nimap;
uint resblks;
- uint rounding;
+ xfs_off_t rounding;
int rt;
xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp;
inode_dio_wait(VFS_I(ip));
}
- rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+ rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
ioffset = offset & ~(rounding - 1);
error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ioffset, -1);
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
-#ifdef CONFIG_PM
int acpi_bus_set_power(acpi_handle handle, int state);
const char *acpi_power_state_string(int state);
int acpi_device_get_power(struct acpi_device *device, int *state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
+
+#ifdef CONFIG_PM
bool acpi_bus_can_wakeup(acpi_handle handle);
-#else /* !CONFIG_PM */
-static inline int acpi_bus_set_power(acpi_handle handle, int state)
-{
- return 0;
-}
-static inline const char *acpi_power_state_string(int state)
-{
- return "D0";
-}
-static inline int acpi_device_get_power(struct acpi_device *device, int *state)
-{
- return 0;
-}
-static inline int acpi_device_set_power(struct acpi_device *device, int state)
-{
- return 0;
-}
-static inline int acpi_bus_init_power(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_bus_update_power(acpi_handle handle, int *state_p)
-{
- return 0;
-}
-static inline bool acpi_bus_power_manageable(acpi_handle handle)
-{
- return false;
-}
-static inline bool acpi_bus_can_wakeup(acpi_handle handle)
-{
- return false;
-}
-#endif /* !CONFIG_PM */
+#else
+static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
+#endif
#ifdef CONFIG_ACPI_PROC_EVENT
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
/*
* OSL Initialization and shutdown primitives
*/
-acpi_status __initdata acpi_os_initialize(void);
+acpi_status __init acpi_os_initialize(void);
acpi_status acpi_os_terminate(void);
int acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
-int acpi_processor_suspend(struct device *dev);
-int acpi_processor_resume(struct device *dev);
extern struct cpuidle_driver acpi_idle_driver;
+#ifdef CONFIG_PM_SLEEP
+void acpi_processor_syscore_init(void);
+void acpi_processor_syscore_exit(void);
+#else
+static inline void acpi_processor_syscore_init(void) {}
+static inline void acpi_processor_syscore_exit(void) {}
+#endif
+
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT */
+#ifndef xlate_dev_kmem_ptr
#define xlate_dev_kmem_ptr(p) p
+#endif
+#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr(p) __va(p)
+#endif
#ifdef CONFIG_VIRT_TO_BUS
#ifndef virt_to_bus
unsigned long start;
unsigned long end;
unsigned int need_flush : 1, /* Did free PTEs */
- fast_mode : 1; /* No batching */
-
/* we are in the middle of an operation to clear
* a full mm and can make some optimizations */
- unsigned int fullmm : 1,
+ fullmm : 1,
/* we have performed an operation which
* requires a complete flush of the tlb */
need_flush_all : 1;
#define HAVE_GENERIC_MMU_GATHER
-static inline int tlb_fast_mode(struct mmu_gather *tlb)
-{
-#ifdef CONFIG_SMP
- return tlb->fast_mode;
-#else
- /*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
- return 1;
-#endif
-}
-
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
int flags;
drm_ioctl_t *func;
unsigned int cmd_drv;
+ const char *name;
};
/**
*/
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
+ [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
struct drm_magic_entry {
struct list_head head;
/**
* struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
- * @gamma_set: - Set the given gamma lut register on the given crtc.
- * @gamma_get: - Read the given gamma lut register on the given crtc, used to
- * save the current lut when force-restoring the fbdev for e.g.
- * kdbg.
- * @fb_probe: - Driver callback to allocate and initialize the fbdev info
- * structure. Futhermore it also needs to allocate the drm
- * framebuffer used to back the fbdev.
+ * @gamma_set: Set the given gamma lut register on the given crtc.
+ * @gamma_get: Read the given gamma lut register on the given crtc, used to
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+ * structure. Futhermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
*
* Driver callbacks used by the fbdev emulation helper library.
*/
/** Other copying of data from kernel space */
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
copy_to_user(arg1, arg2, arg3)
-/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size ) \
- (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
- __copy_from_user(arg1, arg2, arg3)
-#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
- __copy_to_user(arg1, arg2, arg3)
-#define DRM_GET_USER_UNCHECKED(val, uaddr) \
- __get_user(val, uaddr)
#define DRM_HZ HZ
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
* @dev: struct device of this controller
* @acpi_dma_xlate: callback function to find a suitable channel
* @data: private data used by a callback function
+ * @base_request_line: first supported request line (CSRT)
+ * @end_request_line: last supported request line (CSRT)
*/
struct acpi_dma {
struct list_head dma_controllers;
struct dma_chan *(*acpi_dma_xlate)
(struct acpi_dma_spec *, struct acpi_dma *);
void *data;
+ unsigned short base_request_line;
+ unsigned short end_request_line;
};
/* Used with acpi_dma_simple_xlate() */
}
#endif
-extern void cper_print_aer(const char *prefix, struct pci_dev *dev,
+extern void cper_print_aer(struct pci_dev *dev,
int cper_severity, struct aer_capability_regs *aer);
extern int cper_severity_to_aer(int cper_severity);
extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
- int severity);
+ int severity,
+ struct aer_capability_regs *aer_regs);
#endif //_AER_H_
#define BCMA_CORE_I2S 0x834
#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */
#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */
-#define BCMA_CORE_ARM_CR4 0x83e
+#define BCMA_CORE_PHY_AC 0x83B
+#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
+#define BCMA_CORE_USB30_DEV 0x83D
+#define BCMA_CORE_ARM_CR4 0x83E
#define BCMA_CORE_DEFAULT 0xFFF
#define BCMA_MAX_NR_CORES 16
+#ifndef _LINUX_BRCMPHY_H
+#define _LINUX_BRCMPHY_H
+
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
#define PHY_BCM_FLAGS_VALID 0x80000000
+
+#endif /* _LINUX_BRCMPHY_H */
*
* If a subsystem synchronizes against the parent in its ->css_online() and
* before starting iterating, and synchronizes against @pos on each
- * iteration, any descendant cgroup which finished ->css_offline() is
+ * iteration, any descendant cgroup which finished ->css_online() is
* guaranteed to be visible in the future iterations.
*
* In other words, the following guarantees that a descendant can't escape
extern void fb_set_suspend(struct fb_info *info, int state);
extern int fb_get_color_depth(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix);
-extern int fb_get_options(char *name, char **option);
+extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
extern struct fb_info *registered_fb[FB_MAX];
/*
* Journalling list for this buffer [jbd_lock_bh_state()]
+ * NOTE: We *cannot* combine this with b_modified into a bitfield
+ * as gcc would then (which the C standard allows but which is
+ * very unuseful) make 64-bit accesses to the bitfield and clobber
+ * b_jcount if its update races with bitfield modification.
*/
- unsigned b_jlist:4;
+ unsigned b_jlist;
/*
* This flag signals the buffer has been modified by
* the currently running transaction
* [jbd_lock_bh_state()]
*/
- unsigned b_modified:1;
+ unsigned b_modified;
/*
* Copy of the buffer data frozen for writing to the log.
extern __printf(2, 3)
int __trace_printk(unsigned long ip, const char *fmt, ...);
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str, int size);
+
/**
* trace_puts - write a string into the ftrace buffer
* @str: the string to record
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
*/
-extern int __trace_bputs(unsigned long ip, const char *str);
-extern int __trace_puts(unsigned long ip, const char *str, int size);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt \
__attribute__((section("__trace_printk_fmt"))) = \
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
return kref_sub(kref, 1, release);
}
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception. If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count. The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+ void (*release)(struct kref *kref),
+ spinlock_t *lock)
+{
+ unsigned long flags;
+
+ WARN_ON(release == NULL);
+ if (atomic_add_unless(&kref->refcount, -1, 1))
+ return 0;
+ spin_lock_irqsave(lock, flags);
+ if (atomic_dec_and_test(&kref->refcount)) {
+ release(kref);
+ local_irq_restore(flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(lock, flags);
+ return 0;
+}
+
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
+/**
+ * list_first_entry_or_null - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
/**
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
- * @pm_power_off: Should machine pm power off hook be registered or not
* @init: board-specific initialization after detection of ab8500
* @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
int irq_base;
- bool pm_power_off;
void (*init) (struct ab8500 *);
struct ab8500_regulator_platform_data *regulator;
struct abx500_gpio_platform_data *gpio;
struct mlx4_qp_path {
u8 fl;
- u8 reserved1[1];
+ u8 vlan_control;
u8 disable_pkey_check;
u8 pkey_index;
u8 counter_index;
u8 sched_queue;
u8 vlan_index;
u8 feup;
- u8 reserved3;
+ u8 fvl_rx;
u8 reserved4[2];
u8 dmac[6];
};
+enum { /* fl */
+ MLX4_FL_CV = 1 << 6,
+ MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
+};
+enum { /* vlan_control */
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
+};
+
+enum { /* feup */
+ MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
+ MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
+ MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
+};
+
+enum { /* fvl_rx */
+ MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
+};
+
struct mlx4_qp_context {
__be32 flags;
__be32 pd;
u32 reserved5[10];
};
+enum { /* param3 */
+ MLX4_STRIP_VLAN = 1 << 30
+};
+
/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
}
netdev_features_t netdev_increment_features(netdev_features_t all,
netdev_features_t one, netdev_features_t mask);
+
+/* Allow TSO being used on stacked device :
+ * Performing the GSO segmentation before last device
+ * is a performance improvement.
+ */
+static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
+ netdev_features_t mask)
+{
+ return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
+}
+
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
extern int ipv6_netfilter_init(void);
extern void ipv6_netfilter_fini(void);
+
+/*
+ * Hook functions for ipv6 to allow xt_* modules to be built-in even
+ * if IPv6 is a module.
+ */
+struct nf_ipv6_ops {
+ int (*chk_addr)(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
+};
+
+extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
+{
+ return rcu_dereference(nf_ipv6_ops);
+}
+
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
*
*/
-#ifdef CONFIG_OF_DEVICE
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+
+#ifdef CONFIG_OF_DEVICE
#include <linux/pm.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#if !defined(CONFIG_OF_ADDRESS)
struct of_dev_auxdata;
-struct device;
+struct device_node;
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
void acpiphp_init(void);
void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
void acpiphp_remove_slots(struct pci_bus *bus);
+void acpiphp_check_host_bridge(acpi_handle handle);
#else
static inline void acpiphp_init(void) { }
static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
acpi_handle handle) { }
static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
+static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
#endif
#else /* CONFIG_ACPI */
* if it is 0, pull-down is disabled.
* @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
* low, this is the most typical case and is typically achieved with two
- * active transistors on the output. Sending this config will enabale
+ * active transistors on the output. Setting this config will enable
* push-pull mode, the argument is ignored.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
- * which are then pulled up with an external resistor. Sending this
- * config will enabale open drain mode, the argument is ignored.
+ * which are then pulled up with an external resistor. Setting this
+ * config will enable open drain mode, the argument is ignored.
* @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
- * (open emitter). Sending this config will enabale open drain mode, the
+ * (open emitter). Setting this config will enable open drain mode, the
* argument is ignored.
- * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as
- * argument. The argument is in mA.
+ * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
+ * passed as argument. The argument is in mA.
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
#ifndef __CLK_LPSS_H
#define __CLK_LPSS_H
+struct lpss_clk_data {
+ const char *name;
+ struct clk *clk;
+};
+
extern int lpt_clk_init(void);
#endif /* __CLK_LPSS_H */
int DTR_present;
int (*get_context_loss_count)(struct device *);
- void (*set_forceidle)(struct device *);
- void (*set_noidle)(struct device *);
void (*enable_wakeup)(struct device *, bool);
};
#include <stdarg.h>
#include <linux/init.h>
#include <linux/kern_levels.h>
+#include <linux/linkage.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
+/**
+ * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ *
+ * This is the same as hlist_for_each_entry_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
+ for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_nulls_node within the struct.
*
+ * The barrier() is needed to make sure compiler doesn't cache first element [1],
+ * as this loop can be restarted [2]
+ * [1] Documentation/atomic_ops.txt around line 114
+ * [2] Documentation/RCU/rculist_nulls.txt around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ for (({barrier();}), \
+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
+/*
+ * The tracing infrastructure traces RCU (we want that), but unfortunately
+ * some of the RCU checks causes tracing to lock up the system.
+ *
+ * The tracing version of rcu_dereference_raw() must not call
+ * rcu_read_lock_held().
+ */
+#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
+
/**
* rcu_access_index() - fetch RCU index with no dereferencing
* @p: The index to read
extern struct bus_type rio_bus_type;
extern struct device rio_bus;
-extern struct list_head rio_devices; /* list of all devices */
struct rio_mport;
struct rio_dev;
* @name: Port name string
* @priv: Master port private data
* @dma: DMA device associated with mport
+ * @nscan: RapidIO network enumeration/discovery operations
*/
struct rio_mport {
struct list_head dbells; /* list of doorbell events */
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_device dma;
#endif
+ struct rio_scan *nscan;
};
+/*
+ * Enumeration/discovery control flags
+ */
+#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */
+
struct rio_id_table {
u16 start; /* logical minimal id */
u32 max; /* max number of IDs in table */
}
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+/**
+ * struct rio_scan - RIO enumeration and discovery operations
+ * @enumerate: Callback to perform RapidIO fabric enumeration.
+ * @discover: Callback to perform RapidIO fabric discovery.
+ */
+struct rio_scan {
+ int (*enumerate)(struct rio_mport *mport, u32 flags);
+ int (*discover)(struct rio_mport *mport, u32 flags);
+};
+
/* Architecture and hardware-specific functions */
extern int rio_register_mport(struct rio_mport *);
extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
struct rio_dev *from);
+extern int rio_init_mports(void);
#endif /* LINUX_RIO_DRV_H */
SKB_GSO_CB(inner_skb)->mac_offset;
}
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+ int new_headroom, headroom;
+ int ret;
+
+ headroom = skb_headroom(skb);
+ ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ new_headroom = skb_headroom(skb);
+ SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+ return 0;
+}
+
static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
-extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
unsigned int len, __wsum *csump);
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
-extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
struct timespec;
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
+extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
- * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when
+ * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
* when not using a GPIO line)
*
* A @spi_device is used to interchange data between an SPI slave
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
- * number. Any individual value may be -EINVAL for CS lines that
+ * number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
*
* Each SPI master controller can communicate with one or more @spi_device
extern bool persistent_clock_exist;
-#ifdef ALWAYS_USE_PERSISTENT_CLOCK
-#define has_persistent_clock() true
-#else
static inline bool has_persistent_clock(void)
{
return persistent_clock_exist;
}
-#endif
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
}
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+
+int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
#endif
}
/**
- * gadget_is_superspeed() - return true if the hardware handles
- * supperspeed
- * @g: controller that might support supper speed
+ * gadget_is_superspeed() - return true if the hardware handles superspeed
+ * @g: controller that might support superspeed
*/
static inline int gadget_is_superspeed(struct usb_gadget *g)
{
struct usb_serial_port *port, struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
int (*chars_in_buffer)(struct tty_struct *tty);
+ void (*wait_until_sent)(struct tty_struct *tty, long timeout);
+ bool (*tx_empty)(struct usb_serial_port *port);
void (*throttle)(struct tty_struct *tty);
void (*unthrottle)(struct tty_struct *tty);
int (*tiocmget)(struct tty_struct *tty);
extern int usb_serial_generic_resume(struct usb_serial *serial);
extern int usb_serial_generic_write_room(struct tty_struct *tty);
extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
+ long timeout);
extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
extern void usb_serial_generic_throttle(struct tty_struct *tty);
int vc_allocate(unsigned int console);
int vc_cons_allocated(unsigned int console);
int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
-void vc_deallocate(unsigned int console);
+struct vc_data *vc_deallocate(unsigned int console);
void reset_palette(struct vc_data *vc);
void do_blank_screen(int entering_gfx);
void do_unblank_screen(int leaving_gfx);
if (!ret) \
break; \
} \
+ if (!ret && (condition)) \
+ ret = 1; \
finish_wait(&wq, &__wait); \
} while (0)
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
+ * The function returns 0 if the @timeout elapsed, or the remaining
+ * jiffies (at least 1) if the @condition evaluated to %true before
+ * the @timeout elapsed.
*/
#define wait_event_timeout(wq, condition, timeout) \
({ \
ret = -ERESTARTSYS; \
break; \
} \
+ if (!ret && (condition)) \
+ ret = 1; \
finish_wait(&wq, &__wait); \
} while (0)
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
- * was interrupted by a signal, and the remaining jiffies otherwise
- * if the condition evaluated to true before the timeout elapsed.
+ * Returns:
+ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
+ * a signal, or the remaining jiffies (at least 1) if the @condition
+ * evaluated to %true before the @timeout elapsed.
*/
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
extern int ipv6_chk_addr(struct net *net,
const struct in6_addr *addr,
- struct net_device *dev,
+ const struct net_device *dev,
int strict);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
* This function may not be called in IRQ context. Calls to this function
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
- * mixed for a single hardware.
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
*
* In process context use instead ieee80211_rx_ni().
*
* (internally defers to a tasklet.)
*
* Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
- * be mixed for a single hardware.
+ * be mixed for a single hardware.Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
* (internally disables bottom halves).
*
* Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
- * not be mixed for a single hardware.
+ * not be mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
* This function may not be called in IRQ context. Calls to this function
* for a single hardware must be synchronized against each other. Calls
* to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
- * may not be mixed for a single hardware.
+ * may not be mixed for a single hardware. Must not run concurrently with
+ * ieee80211_rx() or ieee80211_rx_ni().
*
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
} u;
};
-typedef void nf_logfn(u_int8_t pf,
+typedef void nf_logfn(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
#define _KER_NFNETLINK_LOG_H
void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
#endif
struct psched_ratecfg {
- u64 rate_bps;
- u32 mult;
- u32 shift;
+ u64 rate_bps;
+ u32 mult;
+ u16 overhead;
+ u8 shift;
};
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
unsigned int len)
{
- return ((u64)len * r->mult) >> r->shift;
+ return ((u64)(len + r->overhead) * r->mult) >> r->shift;
}
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate);
+extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
-static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r)
+static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
+ const struct psched_ratecfg *r)
{
- return r->rate_bps >> 3;
+ memset(res, 0, sizeof(*res));
+ res->rate = r->rate_bps >> 3;
+ res->overhead = r->overhead;
}
#endif
struct raw_hashinfo;
struct module;
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+ if (offsetof(struct sock, sk_node.next) != 0)
+ memset(sk, 0, offsetof(struct sock, sk_node.next));
+ memset(&sk->sk_node.pprev, 0,
+ size - offsetof(struct sock, sk_node.pprev));
+}
+
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
}
}
+extern void xfrm_garbage_collect(struct net *net);
+
#else
static inline void xfrm_sk_free_policy(struct sock *sk) {}
{
return 1;
}
+static inline void xfrm_garbage_collect(struct net *net)
+{
+}
#endif
static __inline__
#define CMD_T_ABORTED (1 << 0)
#define CMD_T_ACTIVE (1 << 1)
#define CMD_T_COMPLETE (1 << 2)
-#define CMD_T_QUEUED (1 << 3)
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
#define CMD_T_FAILED (1 << 6)
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
+ struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
struct kref sess_kref;
};
bool def_pr_registered;
/* See transport_lunflags_table */
u32 lun_flags;
- u32 deve_cmds;
u32 mapped_lun;
- u32 average_bytes;
- u32 last_byte_count;
u32 total_cmds;
- u32 total_bytes;
u64 pr_res_key;
u64 creation_time;
u32 attach_count;
void target_execute_cmd(struct se_cmd *cmd);
-void transport_generic_free_cmd(struct se_cmd *, int);
+int transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
int target_put_sess_cmd(struct se_session *, struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
-void target_wait_for_sess_cmds(struct se_session *, int);
+void target_wait_for_sess_cmds(struct se_session *);
int core_alua_check_nonop_delay(struct se_cmd *);
__entry->lblk, __entry->len)
);
-TRACE_EVENT(ext4_es_find_delayed_extent_enter,
+TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
TP_ARGS(inode, lblk),
(unsigned long) __entry->ino, __entry->lblk)
);
-TRACE_EVENT(ext4_es_find_delayed_extent_exit,
+TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
TP_PROTO(struct inode *inode, struct extent_status *es),
TP_ARGS(inode, es),
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
+#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
struct virtio_console_config {
/* colums of the screens */
#ifndef __OMAP_PANEL_DATA_H
#define __OMAP_PANEL_DATA_H
+#include <video/omapdss.h>
+#include <video/display_timing.h>
+
struct omap_dss_device;
/**
int nreset_gpio;
};
+/**
+ * encoder_tfp410 platform data
+ * @name: name for this display entity
+ * @power_down_gpio: gpio number for PD pin (or -1 if not available)
+ * @data_lines: number of DPI datalines
+ */
+struct encoder_tfp410_platform_data {
+ const char *name;
+ const char *source;
+ int power_down_gpio;
+ int data_lines;
+};
+
+/**
+ * encoder_tpd12s015 platform data
+ * @name: name for this display entity
+ * @ct_cp_hpd_gpio: CT_CP_HPD gpio number
+ * @ls_oe_gpio: LS_OE gpio number
+ * @hpd_gpio: HPD gpio number
+ */
+struct encoder_tpd12s015_platform_data {
+ const char *name;
+ const char *source;
+
+ int ct_cp_hpd_gpio;
+ int ls_oe_gpio;
+ int hpd_gpio;
+};
+
+/**
+ * connector_dvi platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @i2c_bus_num: i2c bus number to be used for reading EDID
+ */
+struct connector_dvi_platform_data {
+ const char *name;
+ const char *source;
+ int i2c_bus_num;
+};
+
+/**
+ * connector_hdmi platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ */
+struct connector_hdmi_platform_data {
+ const char *name;
+ const char *source;
+};
+
+/**
+ * connector_atv platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @connector_type: composite/svideo
+ * @invert_polarity: invert signal polarity
+ */
+struct connector_atv_platform_data {
+ const char *name;
+ const char *source;
+
+ enum omap_dss_venc_type connector_type;
+ bool invert_polarity;
+};
+
+/**
+ * panel_dpi platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ * @display_timing: timings for this panel
+ * @backlight_gpio: gpio to enable/disable the backlight (or -1)
+ * @enable_gpio: gpio to enable/disable the panel (or -1)
+ */
+struct panel_dpi_platform_data {
+ const char *name;
+ const char *source;
+
+ int data_lines;
+
+ const struct display_timing *display_timing;
+
+ int backlight_gpio;
+ int enable_gpio;
+};
+
+/**
+ * panel_dsicm platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @reset_gpio: gpio to reset the panel (or -1)
+ * @use_ext_te: use external TE GPIO
+ * @ext_te_gpio: external TE GPIO
+ * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
+ * @use_dsi_backlight: true if panel uses DSI command to control backlight
+ * @pin_config: DSI pin configuration
+ */
+struct panel_dsicm_platform_data {
+ const char *name;
+ const char *source;
+
+ int reset_gpio;
+
+ bool use_ext_te;
+ int ext_te_gpio;
+
+ unsigned ulps_timeout;
+
+ bool use_dsi_backlight;
+
+ struct omap_dsi_pin_config pin_config;
+};
+
+/**
+ * panel_acx565akm platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @reset_gpio: gpio to reset the panel (or -1)
+ * @datapairs: number of SDI datapairs
+ */
+struct panel_acx565akm_platform_data {
+ const char *name;
+ const char *source;
+
+ int reset_gpio;
+
+ int datapairs;
+};
+
+/**
+ * panel_lb035q02 platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ * @backlight_gpio: gpio to enable/disable the backlight (or -1)
+ * @enable_gpio: gpio to enable/disable the panel (or -1)
+ */
+struct panel_lb035q02_platform_data {
+ const char *name;
+ const char *source;
+
+ int data_lines;
+
+ int backlight_gpio;
+ int enable_gpio;
+};
+
+/**
+ * panel_sharp_ls037v7dw01 platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ * @resb_gpio: reset signal GPIO
+ * @ini_gpio: power on control GPIO
+ * @mo_gpio: selection for resolution(VGA/QVGA) GPIO
+ * @lr_gpio: selection for horizontal scanning direction GPIO
+ * @ud_gpio: selection for vertical scanning direction GPIO
+ */
+struct panel_sharp_ls037v7dw01_platform_data {
+ const char *name;
+ const char *source;
+
+ int data_lines;
+
+ int resb_gpio;
+ int ini_gpio;
+ int mo_gpio;
+ int lr_gpio;
+ int ud_gpio;
+};
+
+/**
+ * panel-tpo-td043mtea1 platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ * @nreset_gpio: reset signal
+ */
+struct panel_tpo_td043mtea1_platform_data {
+ const char *name;
+ const char *source;
+
+ int data_lines;
+
+ int nreset_gpio;
+};
+
+/**
+ * panel-nec-nl8048hl11 platform data
+ * @name: name for this display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ * @res_gpio: reset signal
+ * @qvga_gpio: selection for resolution(QVGA/WVGA)
+ */
+struct panel_nec_nl8048hl11_platform_data {
+ const char *name;
+ const char *source;
+
+ int data_lines;
+
+ int res_gpio;
+ int qvga_gpio;
+};
+
#endif /* __OMAP_PANEL_DATA_H */
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <video/videomode.h>
+
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
OMAP_DISPLAY_TYPE_DSI = 1 << 3,
OMAP_DISPLAY_TYPE_VENC = 1 << 4,
OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
+ OMAP_DISPLAY_TYPE_DVI = 1 << 6,
};
enum omap_plane {
OMAP_DSS_AUDIO_PLAYING,
};
+struct omap_dss_audio {
+ struct snd_aes_iec958 *iec;
+ struct snd_cea_861_aud_if *cea;
+};
+
enum omap_dss_rotation_type {
OMAP_DSS_ROT_DMA = 1 << 0,
OMAP_DSS_ROT_VRFB = 1 << 1,
int num_devices;
struct omap_dss_device **devices;
struct omap_dss_device *default_device;
+ const char *default_display_name;
int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
int (*set_min_bus_tput)(struct device *dev, unsigned long r);
enum omap_dss_output_id supported_outputs;
/* dynamic fields */
- struct omap_dss_output *output;
+ struct omap_dss_device *output;
/*
* The following functions do not block:
*/
int (*set_output)(struct omap_overlay_manager *mgr,
- struct omap_dss_output *output);
+ struct omap_dss_device *output);
int (*unset_output)(struct omap_overlay_manager *mgr);
int (*set_manager_info)(struct omap_overlay_manager *mgr,
u8 pre_mult_alpha;
};
-struct omap_dss_output {
- struct list_head list;
+struct omapdss_dpi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
- const char *name;
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
- /* display type supported by the output */
- enum omap_display_type type;
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
- /* DISPC channel for this output */
- enum omap_channel dispc_channel;
+ void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
+};
- /* output instance */
- enum omap_dss_output_id id;
+struct omapdss_sdi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
- /* output's platform device pointer */
- struct platform_device *pdev;
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
- /* dynamic fields */
- struct omap_overlay_manager *manager;
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
- struct omap_dss_device *device;
+ void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
+};
+
+struct omapdss_dvi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+};
+
+struct omapdss_atv_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_type)(struct omap_dss_device *dssdev,
+ enum omap_dss_venc_type type);
+ void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
+ bool invert_polarity);
+
+ int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+ u32 (*get_wss)(struct omap_dss_device *dssdev);
+};
+
+struct omapdss_hdmi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ /*
+ * Note: These functions might sleep. Do not call while
+ * holding a spinlock/readlock.
+ */
+ int (*audio_enable)(struct omap_dss_device *dssdev);
+ void (*audio_disable)(struct omap_dss_device *dssdev);
+ bool (*audio_supported)(struct omap_dss_device *dssdev);
+ int (*audio_config)(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio);
+ /* Note: These functions may not sleep */
+ int (*audio_start)(struct omap_dss_device *dssdev);
+ void (*audio_stop)(struct omap_dss_device *dssdev);
+};
+
+struct omapdss_dsi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
+ bool enter_ulps);
+
+ /* bus configuration */
+ int (*set_config)(struct omap_dss_device *dssdev,
+ const struct omap_dss_dsi_config *cfg);
+ int (*configure_pins)(struct omap_dss_device *dssdev,
+ const struct omap_dsi_pin_config *pin_cfg);
+
+ void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
+ bool enable);
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*update)(struct omap_dss_device *dssdev, int channel,
+ void (*callback)(int, void *), void *data);
+
+ void (*bus_lock)(struct omap_dss_device *dssdev);
+ void (*bus_unlock)(struct omap_dss_device *dssdev);
+
+ int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
+ void (*disable_video_output)(struct omap_dss_device *dssdev,
+ int channel);
+
+ int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
+ int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
+ int vc_id);
+ void (*release_vc)(struct omap_dss_device *dssdev, int channel);
+
+ /* data transfer */
+ int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+ u8 *data, int len);
+
+ int (*gen_write)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*gen_read)(struct omap_dss_device *dssdev, int channel,
+ u8 *reqdata, int reqlen,
+ u8 *data, int len);
+
+ int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
+
+ int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
+ int channel, u16 plen);
};
struct omap_dss_device {
- struct device dev;
+ /* old device, to be removed */
+ struct device old_dev;
+
+ /* new device, pointer to panel device */
+ struct device *dev;
+
+ struct module *owner;
+
+ struct list_head panel_list;
+
+ /* alias in the form of "display%d" */
+ char alias[16];
enum omap_display_type type;
+ enum omap_display_type output_type;
/* obsolete, to be removed */
enum omap_channel channel;
struct {
int module;
-
- bool ext_te;
- u8 ext_te_gpio;
} dsi;
struct {
struct rfbi_timings rfbi_timings;
} ctrl;
- int reset_gpio;
-
- int max_backlight_level;
-
const char *name;
/* used to match device to driver */
struct omap_dss_driver *driver;
+ union {
+ const struct omapdss_dpi_ops *dpi;
+ const struct omapdss_sdi_ops *sdi;
+ const struct omapdss_dvi_ops *dvi;
+ const struct omapdss_hdmi_ops *hdmi;
+ const struct omapdss_atv_ops *atv;
+ const struct omapdss_dsi_ops *dsi;
+ } ops;
+
/* helper variable for driver suspend/resume */
bool activate_after_resume;
enum omap_display_caps caps;
- struct omap_dss_output *output;
+ struct omap_dss_device *output;
enum omap_dss_display_state state;
enum omap_dss_audio_state audio_state;
- /* platform specific */
- int (*platform_enable)(struct omap_dss_device *dssdev);
- void (*platform_disable)(struct omap_dss_device *dssdev);
- int (*set_backlight)(struct omap_dss_device *dssdev, int level);
- int (*get_backlight)(struct omap_dss_device *dssdev);
+ /* OMAP DSS output specific fields */
+
+ struct list_head list;
+
+ /* DISPC channel for this output */
+ enum omap_channel dispc_channel;
+
+ /* output instance */
+ enum omap_dss_output_id id;
+
+ /* dynamic fields */
+ struct omap_overlay_manager *manager;
+
+ struct omap_dss_device *device;
};
struct omap_dss_hdmi_data
int hpd_gpio;
};
-struct omap_dss_audio {
- struct snd_aes_iec958 *iec;
- struct snd_cea_861_aud_if *cea;
-};
-
struct omap_dss_driver {
struct device_driver driver;
int (*probe)(struct omap_dss_device *);
void (*remove)(struct omap_dss_device *);
+ int (*connect)(struct omap_dss_device *dssdev);
+ void (*disconnect)(struct omap_dss_device *dssdev);
+
int (*enable)(struct omap_dss_device *display);
void (*disable)(struct omap_dss_device *display);
int (*run_test)(struct omap_dss_device *display, int test);
};
enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
int omap_dss_register_driver(struct omap_dss_driver *);
void omap_dss_unregister_driver(struct omap_dss_driver *);
-void omap_dss_get_device(struct omap_dss_device *dssdev);
+int omapdss_register_display(struct omap_dss_device *dssdev);
+void omapdss_unregister_display(struct omap_dss_device *dssdev);
+
+struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
void omap_dss_put_device(struct omap_dss_device *dssdev);
#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
int (*match)(struct omap_dss_device *dssdev, void *data));
const char *omapdss_get_default_display_name(void);
-int omap_dss_start_device(struct omap_dss_device *dssdev);
-void omap_dss_stop_device(struct omap_dss_device *dssdev);
+void videomode_to_omap_video_timings(const struct videomode *vm,
+ struct omap_video_timings *ovt);
+void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
+ struct videomode *vm);
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
int omap_dss_get_num_overlays(void);
struct omap_overlay *omap_dss_get_overlay(int num);
-struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id);
-int omapdss_output_set_device(struct omap_dss_output *out,
+int omapdss_register_output(struct omap_dss_device *output);
+void omapdss_unregister_output(struct omap_dss_device *output);
+struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
+struct omap_dss_device *omap_dss_find_output(const char *name);
+struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node);
+int omapdss_output_set_device(struct omap_dss_device *out,
struct omap_dss_device *dssdev);
-int omapdss_output_unset_device(struct omap_dss_output *out);
+int omapdss_output_unset_device(struct omap_dss_device *out);
+
+struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres);
bool mem_to_mem);
#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
-#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
+#define to_dss_device(x) container_of((x), struct omap_dss_device, old_dev)
void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
bool enable);
void omapdss_compat_uninit(void);
struct dss_mgr_ops {
+ int (*connect)(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst);
+
void (*start_update)(struct omap_overlay_manager *mgr);
int (*enable)(struct omap_overlay_manager *mgr);
void (*disable)(struct omap_overlay_manager *mgr);
int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
void dss_uninstall_mgr_ops(void);
+int dss_mgr_connect(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst);
+void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dst);
void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
const struct omap_video_timings *timings);
void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
void (*handler)(void *), void *data);
void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
void (*handler)(void *), void *data);
+
+static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+ return dssdev->output;
+}
+
+static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
+{
+ return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
+}
+
#endif
struct device dev;
enum xenbus_state state;
struct completion down;
+ struct work_struct work;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)
int otime, struct list_head *pt)
{
int i;
+ int progress;
- if (sma->complex_count || sops == NULL) {
- if (update_queue(sma, -1, pt))
+ progress = 1;
+retry_global:
+ if (sma->complex_count) {
+ if (update_queue(sma, -1, pt)) {
+ progress = 1;
otime = 1;
+ sops = NULL;
+ }
}
+ if (!progress)
+ goto done;
if (!sops) {
/* No semops; something special is going on. */
for (i = 0; i < sma->sem_nsems; i++) {
- if (update_queue(sma, i, pt))
+ if (update_queue(sma, i, pt)) {
otime = 1;
+ progress = 1;
+ }
}
- goto done;
+ goto done_checkretry;
}
/* Check the semaphores that were modified. */
if (sops[i].sem_op > 0 ||
(sops[i].sem_op < 0 &&
sma->sem_base[sops[i].sem_num].semval == 0))
- if (update_queue(sma, sops[i].sem_num, pt))
+ if (update_queue(sma, sops[i].sem_num, pt)) {
otime = 1;
+ progress = 1;
+ }
+ }
+done_checkretry:
+ if (progress) {
+ progress = 0;
+ goto retry_global;
}
done:
if (otime)
* @seq: netlink audit message sequence (serial) number
* @data: payload data
* @datasz: size of payload data
- * @loginuid: loginuid of sender
- * @sessionid: sessionid for netlink audit message
- * @sid: SE Linux Security ID of sender
*/
int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
{
*/
cgroup_drop_root(opts.new_root);
- if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) &&
- root->flags != opts.flags) {
- pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
- ret = -EINVAL;
- goto drop_new_super;
+ if (root->flags != opts.flags) {
+ if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
+ pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
+ ret = -EINVAL;
+ goto drop_new_super;
+ } else {
+ pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
+ }
}
/* no subsys rebinding, so refcounts don't change */
goto out;
}
+ cfe->type = (void *)cft;
+ cfe->dentry = dentry;
+ dentry->d_fsdata = cfe;
+ simple_xattrs_init(&cfe->xattrs);
+
mode = cgroup_file_mode(cft);
error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
if (!error) {
- cfe->type = (void *)cft;
- cfe->dentry = dentry;
- dentry->d_fsdata = cfe;
- simple_xattrs_init(&cfe->xattrs);
list_add_tail(&cfe->node, &parent->files);
cfe = NULL;
}
WARN_ON_ONCE(!rcu_read_lock_held());
/* if first iteration, pretend we just visited @cgroup */
- if (!pos) {
- if (list_empty(&cgroup->children))
- return NULL;
+ if (!pos)
pos = cgroup;
- }
/* visit the first child if exists */
next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
return next;
/* no child, visit my or the closest ancestor's next sibling */
- do {
+ while (pos != cgroup) {
next = list_entry_rcu(pos->sibling.next, struct cgroup,
sibling);
if (&next->sibling != &pos->parent->children)
return next;
pos = pos->parent;
- } while (pos != cgroup);
+ }
return NULL;
}
static inline int cpu_idle_poll(void)
{
+ rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!need_resched())
cpu_relax();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+ rcu_idle_exit();
return 1;
}
perf_output_end(&handle);
}
+typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
+typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
+
+static void
+perf_event_aux_ctx(struct perf_event_context *ctx,
+ perf_event_aux_match_cb match,
+ perf_event_aux_output_cb output,
+ void *data)
+{
+ struct perf_event *event;
+
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
+ continue;
+ if (!event_filter_match(event))
+ continue;
+ if (match(event, data))
+ output(event, data);
+ }
+}
+
+static void
+perf_event_aux(perf_event_aux_match_cb match,
+ perf_event_aux_output_cb output,
+ void *data,
+ struct perf_event_context *task_ctx)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ int ctxn;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
+ if (task_ctx)
+ goto next;
+ ctxn = pmu->task_ctx_nr;
+ if (ctxn < 0)
+ goto next;
+ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+ if (ctx)
+ perf_event_aux_ctx(ctx, match, output, data);
+next:
+ put_cpu_ptr(pmu->pmu_cpu_context);
+ }
+
+ if (task_ctx) {
+ preempt_disable();
+ perf_event_aux_ctx(task_ctx, match, output, data);
+ preempt_enable();
+ }
+ rcu_read_unlock();
+}
+
/*
* task tracking -- fork/exit
*
};
static void perf_event_task_output(struct perf_event *event,
- struct perf_task_event *task_event)
+ void *data)
{
+ struct perf_task_event *task_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
task_event->event_id.header.size = size;
}
-static int perf_event_task_match(struct perf_event *event)
-{
- if (event->state < PERF_EVENT_STATE_INACTIVE)
- return 0;
-
- if (!event_filter_match(event))
- return 0;
-
- if (event->attr.comm || event->attr.mmap ||
- event->attr.mmap_data || event->attr.task)
- return 1;
-
- return 0;
-}
-
-static void perf_event_task_ctx(struct perf_event_context *ctx,
- struct perf_task_event *task_event)
+static int perf_event_task_match(struct perf_event *event,
+ void *data __maybe_unused)
{
- struct perf_event *event;
-
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
- if (perf_event_task_match(event))
- perf_event_task_output(event, task_event);
- }
-}
-
-static void perf_event_task_event(struct perf_task_event *task_event)
-{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
- struct pmu *pmu;
- int ctxn;
-
- rcu_read_lock();
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->unique_pmu != pmu)
- goto next;
- perf_event_task_ctx(&cpuctx->ctx, task_event);
-
- ctx = task_event->task_ctx;
- if (!ctx) {
- ctxn = pmu->task_ctx_nr;
- if (ctxn < 0)
- goto next;
- ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
- if (ctx)
- perf_event_task_ctx(ctx, task_event);
- }
-next:
- put_cpu_ptr(pmu->pmu_cpu_context);
- }
- if (task_event->task_ctx)
- perf_event_task_ctx(task_event->task_ctx, task_event);
-
- rcu_read_unlock();
+ return event->attr.comm || event->attr.mmap ||
+ event->attr.mmap_data || event->attr.task;
}
static void perf_event_task(struct task_struct *task,
},
};
- perf_event_task_event(&task_event);
+ perf_event_aux(perf_event_task_match,
+ perf_event_task_output,
+ &task_event,
+ task_ctx);
}
void perf_event_fork(struct task_struct *task)
};
static void perf_event_comm_output(struct perf_event *event,
- struct perf_comm_event *comm_event)
+ void *data)
{
+ struct perf_comm_event *comm_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
comm_event->event_id.header.size = size;
}
-static int perf_event_comm_match(struct perf_event *event)
-{
- if (event->state < PERF_EVENT_STATE_INACTIVE)
- return 0;
-
- if (!event_filter_match(event))
- return 0;
-
- if (event->attr.comm)
- return 1;
-
- return 0;
-}
-
-static void perf_event_comm_ctx(struct perf_event_context *ctx,
- struct perf_comm_event *comm_event)
+static int perf_event_comm_match(struct perf_event *event,
+ void *data __maybe_unused)
{
- struct perf_event *event;
-
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
- if (perf_event_comm_match(event))
- perf_event_comm_output(event, comm_event);
- }
+ return event->attr.comm;
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
char comm[TASK_COMM_LEN];
unsigned int size;
- struct pmu *pmu;
- int ctxn;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
comm_event->comm_size = size;
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
- rcu_read_lock();
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->unique_pmu != pmu)
- goto next;
- perf_event_comm_ctx(&cpuctx->ctx, comm_event);
- ctxn = pmu->task_ctx_nr;
- if (ctxn < 0)
- goto next;
-
- ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
- if (ctx)
- perf_event_comm_ctx(ctx, comm_event);
-next:
- put_cpu_ptr(pmu->pmu_cpu_context);
- }
- rcu_read_unlock();
+ perf_event_aux(perf_event_comm_match,
+ perf_event_comm_output,
+ comm_event,
+ NULL);
}
void perf_event_comm(struct task_struct *task)
};
static void perf_event_mmap_output(struct perf_event *event,
- struct perf_mmap_event *mmap_event)
+ void *data)
{
+ struct perf_mmap_event *mmap_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
}
static int perf_event_mmap_match(struct perf_event *event,
- struct perf_mmap_event *mmap_event,
- int executable)
-{
- if (event->state < PERF_EVENT_STATE_INACTIVE)
- return 0;
-
- if (!event_filter_match(event))
- return 0;
-
- if ((!executable && event->attr.mmap_data) ||
- (executable && event->attr.mmap))
- return 1;
-
- return 0;
-}
-
-static void perf_event_mmap_ctx(struct perf_event_context *ctx,
- struct perf_mmap_event *mmap_event,
- int executable)
+ void *data)
{
- struct perf_event *event;
+ struct perf_mmap_event *mmap_event = data;
+ struct vm_area_struct *vma = mmap_event->vma;
+ int executable = vma->vm_flags & VM_EXEC;
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
- if (perf_event_mmap_match(event, mmap_event, executable))
- perf_event_mmap_output(event, mmap_event);
- }
+ return (!executable && event->attr.mmap_data) ||
+ (executable && event->attr.mmap);
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
unsigned int size;
char tmp[16];
char *buf = NULL;
const char *name;
- struct pmu *pmu;
- int ctxn;
memset(tmp, 0, sizeof(tmp));
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
- rcu_read_lock();
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->unique_pmu != pmu)
- goto next;
- perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
- vma->vm_flags & VM_EXEC);
-
- ctxn = pmu->task_ctx_nr;
- if (ctxn < 0)
- goto next;
-
- ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
- if (ctx) {
- perf_event_mmap_ctx(ctx, mmap_event,
- vma->vm_flags & VM_EXEC);
- }
-next:
- put_cpu_ptr(pmu->pmu_cpu_context);
- }
- rcu_read_unlock();
+ perf_event_aux(perf_event_mmap_match,
+ perf_event_mmap_output,
+ mmap_event,
+ NULL);
kfree(buf);
}
* irq_domain_add_simple() - Allocate and register a simple irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in mapping
- * @first_irq: first number of irq block assigned to the domain
+ * @first_irq: first number of irq block assigned to the domain,
+ * pass zero to assign irqs on-the-fly. This will result in a
+ * linear IRQ domain so it is important to use irq_create_mapping()
+ * for each used IRQ, especially when SPARSE_IRQ is enabled.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
/* A linear domain is the default */
return irq_domain_add_linear(of_node, size, ops, host_data);
}
+EXPORT_SYMBOL_GPL(irq_domain_add_simple);
/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
while (count--) {
int irq = irq_base + count;
struct irq_data *irq_data = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irq_data->hwirq;
+ irq_hw_number_t hwirq;
if (WARN_ON(!irq_data || irq_data->domain != domain))
continue;
+ hwirq = irq_data->hwirq;
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
int retval = 0;
helper_lock();
+ if (!sub_info->path) {
+ retval = -EINVAL;
+ goto out;
+ }
+
if (sub_info->path[0] == '\0')
goto out;
kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
for (i = 1; i < info->hdr->e_shnum; i++) {
- const char *name = info->secstrings + info->sechdrs[i].sh_name;
- if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
- continue;
- if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
+ /* Scan all writable sections that's not executable */
+ if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
+ !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
+ (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
continue;
kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
mod->trace_events = section_objs(info, "_ftrace_events",
sizeof(*mod->trace_events),
&mod->num_trace_events);
- /*
- * This section contains pointers to allocated objects in the trace
- * code and not scanning it leads to false positives.
- */
- kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
- mod->num_trace_events, GFP_KERNEL);
#endif
#ifdef CONFIG_TRACING
mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
sizeof(*mod->trace_bprintk_fmt_start),
&mod->num_trace_bprintk_fmt);
- /*
- * This section contains pointers to allocated objects in the trace
- * code and not scanning it leads to false positives.
- */
- kmemleak_scan_area(mod->trace_bprintk_fmt_start,
- sizeof(*mod->trace_bprintk_fmt_start) *
- mod->num_trace_bprintk_fmt, GFP_KERNEL);
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
final_start = min(range[i].start, start);
final_end = max(range[i].end, end);
- range[i].start = final_start;
- range[i].end = final_end;
- return nr_range;
+ /* clear it and add it back for further merge */
+ range[i].start = 0;
+ range[i].end = 0;
+ return add_range_with_merge(range, az, nr_range,
+ final_start, final_end);
}
/* Need to add it: */
#ifdef CONFIG_RCU_NOCB_CPU
#ifndef CONFIG_RCU_NOCB_CPU_NONE
if (!have_rcu_nocb_mask) {
- alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+ zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
have_rcu_nocb_mask = true;
}
#ifdef CONFIG_RCU_NOCB_CPU_ZERO
rdtp->last_accelerate = jiffies;
/* Request timer delay depending on laziness, and round. */
- if (rdtp->all_lazy) {
+ if (!rdtp->all_lazy) {
*dj = round_up(rcu_idle_gp_delay + jiffies,
rcu_idle_gp_delay) - jiffies;
} else {
config ARCH_CLOCKSOURCE_DATA
bool
-# Platforms has a persistent clock
-config ALWAYS_USE_PERSISTENT_CLOCK
- bool
- default n
-
# Timekeeping vsyscall support
config GENERIC_TIME_VSYSCALL
bool
void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
{
struct pps_normtime pts_norm, freq_norm;
- unsigned long flags;
pts_norm = pps_normalize_ts(*phase_ts);
}
}
+ /*
+ * Remove the current cpu from the pending mask. The event is
+ * delivered immediately in tick_do_broadcast() !
+ */
+ cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
+
/* Take care of enforced broadcast requests */
cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
cpumask_clear(tick_broadcast_force_mask);
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
- WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
+ WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
/*
* We only reprogram the broadcast timer if we
void __init tick_broadcast_init(void)
{
- alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
- alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
#ifdef CONFIG_TICK_ONESHOT
- alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
- alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
- alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
#endif
}
if (unlikely(!cpu_online(cpu))) {
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+ return false;
}
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
hrtimer_cancel(&ts->sched_timer);
# endif
- ts->nohz_mode = NOHZ_MODE_INACTIVE;
+ memset(ts, 0, sizeof(*ts));
}
#endif
read_persistent_clock(&timekeeping_suspend_time);
+ /*
+ * On some systems the persistent_clock can not be detected at
+ * timekeeping_init by its return value, so if we see a valid
+ * value returned, update the persistent_clock_exists flag.
+ */
+ if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
+ persistent_clock_exist = true;
+
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&timekeeper_seq);
timekeeping_forward_now(tk);
boot_done = 1;
base = &boot_tvec_bases;
}
+ spin_lock_init(&base->lock);
tvec_base_done[cpu] = 1;
} else {
base = per_cpu(tvec_bases, cpu);
}
- spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
/*
* Traverse the ftrace_global_list, invoking all entries. The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
+ * can use rcu_dereference_raw_notrace() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
- * mechanism. The rcu_dereference_raw() calls are needed to handle
+ * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
* concurrent insertions into the ftrace_global_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
#define do_for_each_ftrace_op(op, list) \
- op = rcu_dereference_raw(list); \
+ op = rcu_dereference_raw_notrace(list); \
do
/*
* Optimized for just a single item in the list (as that is the normal case).
*/
#define while_for_each_ftrace_op(op) \
- while (likely(op = rcu_dereference_raw((op)->next)) && \
+ while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
unlikely((op) != &ftrace_list_end))
static inline void ftrace_ops_init(struct ftrace_ops *ops)
if (hlist_empty(hhd))
return NULL;
- hlist_for_each_entry_rcu(rec, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
hhd = &hash->buckets[key];
- hlist_for_each_entry_rcu(entry, hhd, hlist) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
struct ftrace_hash *notrace_hash;
int ret;
- filter_hash = rcu_dereference_raw(ops->filter_hash);
- notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+ filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
if ((ftrace_hash_empty(filter_hash) ||
ftrace_lookup_ip(filter_hash, ip)) &&
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
- hlist_for_each_entry_rcu(entry, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work;
else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -EINVAL;
+
cpu_buffer = buffer->buffers[cpu];
work = &cpu_buffer->irq_work;
}
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
- max_data->uid = task_uid(tsk);
+ /*
+ * If tsk == current, then use current_uid(), as that does not use
+ * RCU. The irq tracer can be called out of RCU scope.
+ */
+ if (tsk == current)
+ max_data->uid = current_uid();
+ else
+ max_data->uid = task_uid(tsk);
+
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority;
trace_init_cmdlines();
- register_tracer(&nop_trace);
-
+ /*
+ * register_tracer() might reference current_trace, so it
+ * needs to be set before we register anything. This is
+ * just a bootstrap of current_trace anyway.
+ */
global_trace.current_trace = &nop_trace;
+ register_tracer(&nop_trace);
+
/* All seems OK, enable tracing */
tracing_disabled = 0;
out_reg:
/* Don't let event modules unload while probe registered */
ret = try_module_get(file->event_call->mod);
- if (!ret)
+ if (!ret) {
+ ret = -EBUSY;
goto out_free;
+ }
ret = __ftrace_event_enable_disable(file, 1, 1);
if (ret < 0)
static void __free_preds(struct event_filter *filter)
{
+ int i;
+
if (filter->preds) {
+ for (i = 0; i < filter->n_preds; i++)
+ kfree(filter->preds[i].ops);
kfree(filter->preds);
filter->preds = NULL;
}
const char *symbol; /* symbol name */
struct ftrace_event_class class;
struct ftrace_event_call call;
- struct ftrace_event_file **files;
+ struct ftrace_event_file * __rcu *files;
ssize_t size; /* trace entry size */
unsigned int nr_args;
struct probe_arg args[];
static int trace_probe_nr_files(struct trace_probe *tp)
{
- struct ftrace_event_file **file = tp->files;
+ struct ftrace_event_file **file;
int ret = 0;
+ /*
+ * Since all tp->files updater is protected by probe_enable_lock,
+ * we don't need to lock an rcu_read_lock.
+ */
+ file = rcu_dereference_raw(tp->files);
if (file)
while (*(file++))
ret++;
mutex_lock(&probe_enable_lock);
if (file) {
- struct ftrace_event_file **new, **old = tp->files;
+ struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp);
+ old = rcu_dereference_raw(tp->files);
/* 1 is for new one and 1 is for stopper */
new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
GFP_KERNEL);
static int
trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
{
+ struct ftrace_event_file **files;
int i;
- if (tp->files) {
- for (i = 0; tp->files[i]; i++)
- if (tp->files[i] == file)
+ /*
+ * Since all tp->files updater is protected by probe_enable_lock,
+ * we don't need to lock an rcu_read_lock.
+ */
+ files = rcu_dereference_raw(tp->files);
+ if (files) {
+ for (i = 0; files[i]; i++)
+ if (files[i] == file)
return i;
}
mutex_lock(&probe_enable_lock);
if (file) {
- struct ftrace_event_file **new, **old = tp->files;
+ struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp);
int i, j;
+ old = rcu_dereference_raw(tp->files);
if (n == 0 || trace_probe_file_index(tp, file) < 0) {
ret = -EINVAL;
goto out_unlock;
static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{
- struct ftrace_event_file **file = tp->files;
+ /*
+ * Note: preempt is already disabled around the kprobe handler.
+ * However, we still need an smp_read_barrier_depends() corresponding
+ * to smp_wmb() in rcu_assign_pointer() to access the pointer.
+ */
+ struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
+
+ if (unlikely(!file))
+ return;
- /* Note: preempt is already disabled around the kprobe handler */
while (*file) {
__kprobe_trace_func(tp, regs, *file);
file++;
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
struct pt_regs *regs)
{
- struct ftrace_event_file **file = tp->files;
+ /*
+ * Note: preempt is already disabled around the kprobe handler.
+ * However, we still need an smp_read_barrier_depends() corresponding
+ * to smp_wmb() in rcu_assign_pointer() to access the pointer.
+ */
+ struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
+
+ if (unlikely(!file))
+ return;
- /* Note: preempt is already disabled around the kprobe handler */
while (*file) {
__kretprobe_trace_func(tp, ri, regs, *file);
file++;
}
/* Event entry printers */
-enum print_line_t
+static enum print_line_t
print_kprobe_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
return TRACE_TYPE_PARTIAL_LINE;
}
-enum print_line_t
+static enum print_line_t
print_kretprobe_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly;
-EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_highpri_wq);
struct workqueue_struct *system_long_wq __read_mostly;
local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL(queue_work_on);
void delayed_work_timer_fn(unsigned long __data)
{
local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL(queue_delayed_work_on);
/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
spin_unlock_irq(&pool->lock);
mutex_lock(&pool->manager_mutex);
+ spin_lock_irq(&pool->lock);
ret = true;
}
* no synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
+ * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
+ * Note that both per-cpu and unbound workqueues may be associated with
+ * multiple pool_workqueues which have separate congested states. A
+ * workqueue being congested on one CPU doesn't mean the workqueue is also
+ * contested on other CPUs / NUMA nodes.
+ *
* RETURNS:
* %true if congested, %false otherwise.
*/
rcu_read_lock_sched();
+ if (cpu == WORK_CPU_UNBOUND)
+ cpu = smp_processor_id();
+
if (!(wq->flags & WQ_UNBOUND))
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
else
BUG_ON(!tbl);
for_each_node(node)
- BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
+ BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ node_online(node) ? node : NUMA_NO_NODE));
for_each_possible_cpu(cpu) {
node = cpu_to_node(cpu);
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
- gcd.o lcm.o list_sort.o uuid.o flex_array.o \
+ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
--- /dev/null
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/uio.h>
+
+/*
+ * Copy iovec to kernel. Returns -EFAULT on error.
+ *
+ * Note: this modifies the original iovec.
+ */
+
+int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
+{
+ while (len > 0) {
+ if (iov->iov_len) {
+ int copy = min_t(unsigned int, len, iov->iov_len);
+ if (copy_from_user(kdata, iov->iov_base, copy))
+ return -EFAULT;
+ len -= copy;
+ kdata += copy;
+ iov->iov_base += copy;
+ iov->iov_len -= copy;
+ }
+ iov++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovec);
+
+/*
+ * Copy kernel to iovec. Returns -EFAULT on error.
+ *
+ * Note: this modifies the original iovec.
+ */
+
+int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
+{
+ while (len > 0) {
+ if (iov->iov_len) {
+ int copy = min_t(unsigned int, iov->iov_len, len);
+ if (copy_to_user(iov->iov_base, kdata, copy))
+ return -EFAULT;
+ kdata += copy;
+ len -= copy;
+ iov->iov_len -= copy;
+ iov->iov_base += copy;
+ }
+ iov++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovec);
if (waiter->node != n)
continue;
+ list_del(&waiter->list);
waiter->woken = 1;
mb();
wake_up_process(waiter->process);
- list_del(&waiter->list);
}
spin_unlock(&klist_remove_lock);
knode_set_klist(n, NULL);
"rM" ((USItype)(bh)), \
"rM" ((USItype)(al)), \
"rM" ((USItype)(bl)))
-#if defined(_PA_RISC1_1)
+#if 0 && defined(_PA_RISC1_1)
+/* xmpyu uses floating point register which is not allowed in Linux kernel. */
#define umul_ppmm(wh, wl, u, v) \
do { \
union {UDItype __ll; \
#define UMUL_TIME 40
#define UDIV_TIME 80
#endif
-#ifndef LONGLONG_STANDALONE
+#if 0 /* #ifndef LONGLONG_STANDALONE */
#define udiv_qrnnd(q, r, n1, n0, d) \
do { USItype __r; \
(q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
- set_pmd_at(mm, address, pmd, _pmd);
+ /*
+ * We can only use set_pmd_at when establishing
+ * hugepmds and never for establishing regular pmds that
+ * points to regular pagetables. Use pmd_populate for that
+ */
+ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(&mm->page_table_lock);
anon_vma_unlock_write(vma->anon_vma);
goto out;
if (mem_cgroup_disabled())
return NULL;
- VM_BUG_ON(PageSwapCache(page));
-
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
if (page_mapped(page))
return;
VM_BUG_ON(page->mapping && !PageAnon(page));
+ /*
+ * If the page is in swap cache, uncharge should be deferred
+ * to the swap path, which also properly accounts swap usage
+ * and handles memcg lifetime.
+ *
+ * Note that this check is not stable and reclaim may add the
+ * page to swap cache at any time after this. However, if the
+ * page is not in swap cache by the time page->mapcount hits
+ * 0, there won't be any page table references to the swap
+ * slot, and reclaim will free it and not actually write the
+ * page to disk.
+ */
if (PageSwapCache(page))
return;
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
tlb->start = -1UL;
tlb->end = 0;
tlb->need_flush = 0;
- tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb_table_flush(tlb);
#endif
- if (tlb_fast_mode(tlb))
- return;
-
for (batch = &tlb->local; batch; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
VM_BUG_ON(!tlb->need_flush);
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu() */
- }
-
batch = tlb->active;
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
start = phys_start_pfn << PAGE_SHIFT;
size = nr_pages * PAGE_SIZE;
ret = release_mem_region_adjustable(&iomem_resource, start, size);
- if (ret)
- pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n",
- start, start + size - 1, ret);
+ if (ret) {
+ resource_size_t endres = start + size - 1;
+
+ pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
+ &start, &endres, ret);
+ }
sections_to_remove = nr_pages / PAGES_PER_SECTION;
for (i = 0; i < sections_to_remove; i++) {
pte = arch_make_huge_pte(pte, vma, new, 0);
}
#endif
- flush_cache_page(vma, addr, pte_pfn(pte));
+ flush_dcache_page(new);
set_pte_at(mm, addr, ptep, pte);
if (PageHuge(new)) {
int id;
/*
- * srcu_read_lock() here will block synchronize_srcu() in
- * mmu_notifier_unregister() until all registered
- * ->release() callouts this function makes have
- * returned.
+ * SRCU here will block mmu_notifier_unregister until
+ * ->release returns.
*/
id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
+ /*
+ * If ->release runs before mmu_notifier_unregister it must be
+ * handled, as it's the only way for the driver to flush all
+ * existing sptes and stop the driver from establishing any more
+ * sptes before all the pages in the mm are freed.
+ */
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ srcu_read_unlock(&srcu, id);
+
spin_lock(&mm->mmu_notifier_mm->lock);
while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
mn = hlist_entry(mm->mmu_notifier_mm->list.first,
struct mmu_notifier,
hlist);
-
/*
- * Unlink. This will prevent mmu_notifier_unregister()
- * from also making the ->release() callout.
+ * We arrived before mmu_notifier_unregister so
+ * mmu_notifier_unregister will do nothing other than to wait
+ * for ->release to finish and for mmu_notifier_unregister to
+ * return.
*/
hlist_del_init_rcu(&mn->hlist);
- spin_unlock(&mm->mmu_notifier_mm->lock);
-
- /*
- * Clear sptes. (see 'release' description in mmu_notifier.h)
- */
- if (mn->ops->release)
- mn->ops->release(mn, mm);
-
- spin_lock(&mm->mmu_notifier_mm->lock);
}
spin_unlock(&mm->mmu_notifier_mm->lock);
/*
- * All callouts to ->release() which we have done are complete.
- * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
- */
- srcu_read_unlock(&srcu, id);
-
- /*
- * mmu_notifier_unregister() may have unlinked a notifier and may
- * still be calling out to it. Additionally, other notifiers
- * may have been active via vmtruncate() et. al. Block here
- * to ensure that all notifier callouts for this mm have been
- * completed and the sptes are really cleaned up before returning
- * to exit_mmap().
+ * synchronize_srcu here prevents mmu_notifier_release from returning to
+ * exit_mmap (which would proceed with freeing all pages in the mm)
+ * until the ->release method returns, if it was invoked by
+ * mmu_notifier_unregister.
+ *
+ * The mmu_notifier_mm can't go away from under us because one mm_count
+ * is held by exit_mmap.
*/
synchronize_srcu(&srcu);
}
{
BUG_ON(atomic_read(&mm->mm_count) <= 0);
- spin_lock(&mm->mmu_notifier_mm->lock);
if (!hlist_unhashed(&mn->hlist)) {
+ /*
+ * SRCU here will force exit_mmap to wait for ->release to
+ * finish before freeing the pages.
+ */
int id;
+ id = srcu_read_lock(&srcu);
/*
- * Ensure we synchronize up with __mmu_notifier_release().
+ * exit_mmap will block in mmu_notifier_release to guarantee
+ * that ->release is called before freeing the pages.
*/
- id = srcu_read_lock(&srcu);
-
- hlist_del_rcu(&mn->hlist);
- spin_unlock(&mm->mmu_notifier_mm->lock);
-
if (mn->ops->release)
mn->ops->release(mn, mm);
+ srcu_read_unlock(&srcu, id);
+ spin_lock(&mm->mmu_notifier_mm->lock);
/*
- * Allow __mmu_notifier_release() to complete.
+ * Can not use list_del_rcu() since __mmu_notifier_release
+ * can delete it before we hold the lock.
*/
- srcu_read_unlock(&srcu, id);
- } else
+ hlist_del_init_rcu(&mn->hlist);
spin_unlock(&mm->mmu_notifier_mm->lock);
+ }
/*
- * Wait for any running method to finish, including ->release() if it
- * was run by __mmu_notifier_release() instead of us.
+ * Wait for any running method to finish, of course including
+ * ->release if it was run by mmu_notifier_relase instead of us.
*/
synchronize_srcu(&srcu);
for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
if (poison)
memset((void *)pos, poison, PAGE_SIZE);
- free_reserved_page(virt_to_page(pos));
+ free_reserved_page(virt_to_page((void *)pos));
}
if (pages && s)
return 0;
}
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
- struct vm_area_struct *vma;
-
- /* We don't need vma lookup at all. */
- if (!walk->hugetlb_entry)
- return NULL;
-
- VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
- vma = find_vma(walk->mm, addr);
- if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
- return vma;
-
- return NULL;
-}
-
#else /* CONFIG_HUGETLB_PAGE */
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
- return NULL;
-}
-
static int walk_hugetlb_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
if (!walk->mm)
return -EINVAL;
+ VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+
pgd = pgd_offset(walk->mm, addr);
do {
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = NULL;
next = pgd_addr_end(addr, end);
/*
- * handle hugetlb vma individually because pagetable walk for
- * the hugetlb page is dependent on the architecture and
- * we can't handled it in the same manner as non-huge pages.
+ * This function was not intended to be vma based.
+ * But there are vma special cases to be handled:
+ * - hugetlb vma's
+ * - VM_PFNMAP vma's
*/
- vma = hugetlb_vma(addr, walk);
+ vma = find_vma(walk->mm, addr);
if (vma) {
- if (vma->vm_end < next)
+ /*
+ * There are no page structures backing a VM_PFNMAP
+ * range, so do not allow split_huge_page_pmd().
+ */
+ if ((vma->vm_start <= addr) &&
+ (vma->vm_flags & VM_PFNMAP)) {
next = vma->vm_end;
+ pgd = pgd_offset(walk->mm, next);
+ continue;
+ }
/*
- * Hugepage is very tightly coupled with vma, so
- * walk through hugetlb entries within a given vma.
+ * Handle hugetlb vma individually because pagetable
+ * walk for the hugetlb page is dependent on the
+ * architecture and we can't handled it in the same
+ * manner as non-huge pages.
*/
- err = walk_hugetlb_range(vma, addr, next, walk);
- if (err)
- break;
- pgd = pgd_offset(walk->mm, next);
- continue;
+ if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
+ is_vm_hugetlb_page(vma)) {
+ if (vma->vm_end < next)
+ next = vma->vm_end;
+ /*
+ * Hugepage is very tightly coupled with vma,
+ * so walk through hugetlb entries within a
+ * given vma.
+ */
+ err = walk_hugetlb_range(vma, addr, next, walk);
+ if (err)
+ break;
+ pgd = pgd_offset(walk->mm, next);
+ continue;
+ }
}
if (pgd_none_or_clear_bad(pgd)) {
*/
del_timer_sync(&app->join_timer);
- spin_lock(&app->lock);
+ spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
- spin_unlock(&app->lock);
+ spin_unlock_bh(&app->lock);
mrp_queue_xmit(app);
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
if (dat_entry) {
+ /* If the ARP request is destined for a local client the local
+ * client will answer itself. DAT would only generate a
+ * duplicate packet.
+ *
+ * Moreover, if the soft-interface is enslaved into a bridge, an
+ * additional DAT answer may trigger kernel warnings about
+ * a packet coming from the wrong port.
+ */
+ if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+ ret = true;
+ goto out;
+ }
+
skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
bat_priv->soft_iface, ip_dst, hw_src,
dat_entry->mac_addr, hw_src);
batadv_vis_quit(bat_priv);
batadv_gw_node_purge(bat_priv);
- batadv_originator_free(bat_priv);
batadv_nc_free(bat_priv);
+ batadv_dat_free(bat_priv);
+ batadv_bla_free(bat_priv);
+ /* Free the TT and the originator tables only after having terminated
+ * all the other depending components which may use these structures for
+ * their purposes.
+ */
batadv_tt_free(bat_priv);
- batadv_bla_free(bat_priv);
-
- batadv_dat_free(bat_priv);
+ /* Since the originator table clean up routine is accessing the TT
+ * tables as well, it has to be invoked after the TT tables have been
+ * freed and marked as empty. This ensures that no cleanup RCU callbacks
+ * accessing the TT data are scheduled for later execution.
+ */
+ batadv_originator_free(bat_priv);
free_percpu(bat_priv->bat_counters);
+ bat_priv->bat_counters = NULL;
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
}
char *algo_name = (char *)val;
size_t name_len = strlen(algo_name);
- if (algo_name[name_len - 1] == '\n')
+ if (name_len > 0 && algo_name[name_len - 1] == '\n')
algo_name[name_len - 1] = '\0';
bat_algo_ops = batadv_algo_get(algo_name);
struct ethhdr *ethhdr, ethhdr_tmp;
uint8_t *orig_dest, ttl, ttvn;
unsigned int coding_len;
+ int err;
/* Save headers temporarily */
memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
coding_len);
/* Resize decoded skb if decoded with larger packet */
- if (nc_packet->skb->len > coding_len + h_size)
- pskb_trim_rcsum(skb, coding_len + h_size);
+ if (nc_packet->skb->len > coding_len + h_size) {
+ err = pskb_trim_rcsum(skb, coding_len + h_size);
+ if (err)
+ return NULL;
+ }
/* Create decoded unicast packet */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
kfree(orig_node);
}
+/**
+ * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
+ * schedule an rcu callback for freeing it
+ * @orig_node: the orig node to free
+ */
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
+/**
+ * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
+ * possibly free it (without rcu callback)
+ * @orig_node: the orig node to free
+ */
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
+{
+ if (atomic_dec_and_test(&orig_node->refcount))
+ batadv_orig_node_free_rcu(&orig_node->rcu);
+}
+
void batadv_originator_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
void batadv_originator_free(struct batadv_priv *bat_priv);
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
const uint8_t *addr);
struct batadv_neigh_node *
batadv_debugfs_del_meshif(dev);
free_bat_counters:
free_percpu(bat_priv->bat_counters);
+ bat_priv->bat_counters = NULL;
return ret;
}
struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
- batadv_orig_node_free_ref(orig_entry->orig_node);
+
+ /* We are in an rcu callback here, therefore we cannot use
+ * batadv_orig_node_free_ref() and its call_rcu():
+ * An rcu_barrier() wouldn't wait for that to finish
+ */
+ batadv_orig_node_free_ref_now(orig_entry->orig_node);
kfree(orig_entry);
}
}
static void
-ebt_log_packet(u_int8_t pf, unsigned int hooknum,
- const struct sk_buff *skb, const struct net_device *in,
- const struct net_device *out, const struct nf_loginfo *loginfo,
- const char *prefix)
+ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
+ const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const struct nf_loginfo *loginfo,
+ const char *prefix)
{
unsigned int bitmask;
- struct net *net = dev_net(in ? in : out);
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
par->in, par->out, &li, "%s", info->prefix);
else
- ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in,
+ ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in,
par->out, &li, info->prefix);
return EBT_CONTINUE;
}
return skb;
}
-static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- const struct ebt_ulog_info *uloginfo, const char *prefix)
+static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct ebt_ulog_info *uloginfo,
+ const char *prefix)
{
ebt_ulog_packet_msg_t *pm;
size_t size, copy_len;
struct nlmsghdr *nlh;
- struct net *net = dev_net(in ? in : out);
struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
unsigned int group = uloginfo->nlgroup;
ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
}
/* this function is registered with the netfilter core */
-static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
+static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const struct nf_loginfo *li,
const char *prefix)
strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
}
- ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+ ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
}
static unsigned int
ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- ebt_ulog_packet(par->hooknum, skb, par->in, par->out,
+ struct net *net = dev_net(par->in ? par->in : par->out);
+
+ ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return EBT_CONTINUE;
}
mutex_lock(&osdc->request_mutex);
if (req->r_linger) {
__unregister_linger_request(osdc, req);
+ req->r_linger = 0;
ceph_osdc_put_request(req);
}
mutex_unlock(&osdc->request_mutex);
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
__register_request(osdc, req);
- WARN_ON(req->r_sent);
+ req->r_sent = 0;
+ req->r_got_reply = 0;
+ req->r_completed = 0;
rc = __map_request(osdc, req, 0);
if (rc < 0) {
if (nofail) {
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags)
{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
int datagrams;
struct timespec ktspec;
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
if (COMPAT_USE_64BIT_TIME)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT,
ha->refcount = 1;
ha->global_use = global;
ha->synced = sync;
+ ha->sync_cnt = 0;
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
}
if (sync) {
if (ha->synced)
- return 0;
+ return -EEXIST;
else
ha->synced = true;
}
err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
false, true);
- if (err)
+ if (err && err != -EEXIST)
return err;
- ha->sync_cnt++;
- ha->refcount++;
+
+ if (!err) {
+ ha->sync_cnt++;
+ ha->refcount++;
+ }
return 0;
}
if (err)
return;
ha->sync_cnt--;
- __hw_addr_del_entry(from_list, ha, false, true);
+ /* address on from list is not marked synced */
+ __hw_addr_del_entry(from_list, ha, false, false);
}
static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
return -EINVAL;
netif_addr_lock_nested(to);
- err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+ err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock(to);
return err;
}
-/*
- * Copy kernel to iovec. Returns -EFAULT on error.
- *
- * Note: this modifies the original iovec.
- */
-
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
-{
- while (len > 0) {
- if (iov->iov_len) {
- int copy = min_t(unsigned int, iov->iov_len, len);
- if (copy_to_user(iov->iov_base, kdata, copy))
- return -EFAULT;
- kdata += copy;
- len -= copy;
- iov->iov_len -= copy;
- iov->iov_base += copy;
- }
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovec);
-
/*
* Copy kernel to iovec. Returns -EFAULT on error.
*/
}
EXPORT_SYMBOL(memcpy_toiovecend);
-/*
- * Copy iovec to kernel. Returns -EFAULT on error.
- *
- * Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
- while (len > 0) {
- if (iov->iov_len) {
- int copy = min_t(unsigned int, len, iov->iov_len);
- if (copy_from_user(kdata, iov->iov_base, copy))
- return -EFAULT;
- len -= copy;
- kdata += copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
/*
* Copy iovec from kernel. Returns -EFAULT on error.
*/
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = NULL;
+ skb->head = NULL;
skb->truesize = sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
static void skb_release_all(struct sk_buff *skb)
{
skb_release_head_state(skb);
- if (likely(skb->data))
+ if (likely(skb->head))
skb_release_data(skb);
}
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
- "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
+ "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
- "slock-AF_NFC" , "slock-AF_MAX"
+ "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
- "clock-AF_NFC" , "clock-AF_MAX"
+ "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
};
/*
#endif
}
-/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
- * un-modified. Special care is taken when initializing object to zero.
- */
-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
-{
- if (offsetof(struct sock, sk_node.next) != 0)
- memset(sk, 0, offsetof(struct sock, sk_node.next));
- memset(&sk->sk_node.pprev, 0,
- size - offsetof(struct sock, sk_node.pprev));
-}
-
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
{
unsigned long nulls1, nulls2;
*/
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn;
- const struct iphdr *iph = (const struct iphdr *)skb->data;
+ const struct iphdr *iph;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
else
itn = net_generic(net, ipgre_net_id);
+ iph = (const struct iphdr *)skb->data;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
iph->daddr, iph->saddr, tpi.key);
EXPORT_SYMBOL(sysctl_ip_default_ttl);
/* Generate a checksum for an outgoing IP datagram. */
-__inline__ void ip_send_check(struct iphdr *iph)
+void ip_send_check(struct iphdr *iph)
{
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
dst = tnl_params->daddr;
if (dst == 0) {
/* NBMA tunnel */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
/* Push down and install the IP header. */
skb_push(skb, sizeof(struct iphdr));
return skb;
}
-static void ipt_ulog_packet(unsigned int hooknum,
+static void ipt_ulog_packet(struct net *net,
+ unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
size_t size, copy_len;
struct nlmsghdr *nlh;
struct timeval tv;
- struct net *net = dev_net(in ? in : out);
struct ulog_net *ulog = ulog_pernet(net);
/* ffs == find first bit set, necessary because userspace
put_unaligned(tv.tv_usec, &pm->timestamp_usec);
put_unaligned(skb->mark, &pm->mark);
pm->hook = hooknum;
- if (prefix != NULL)
- strncpy(pm->prefix, prefix, sizeof(pm->prefix));
+ if (prefix != NULL) {
+ strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
+ pm->prefix[sizeof(pm->prefix) - 1] = '\0';
+ }
else if (loginfo->prefix[0] != '\0')
strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
else
static unsigned int
ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
+ struct net *net = dev_net(par->in ? par->in : par->out);
+
+ ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return XT_CONTINUE;
}
-static void ipt_logfn(u_int8_t pf,
+static void ipt_logfn(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
}
- ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+ ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
}
static int ulog_tg_check(const struct xt_tgchk_param *par)
{
struct rtable *rt;
struct flowi4 fl4;
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ int oif = skb->dev->ifindex;
+ u8 tos = RT_TOS(iph->tos);
+ u8 prot = iph->protocol;
+ u32 mark = skb->mark;
rt = (struct rtable *) dst;
- ip_rt_build_flow_key(&fl4, sk, skb);
+ __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
unsigned int mss;
struct sk_buff *gso_skb = skb;
__sum16 newcheck;
+ bool ooo_okay, copy_destructor;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
goto out;
}
+ copy_destructor = gso_skb->destructor == tcp_wfree;
+ ooo_okay = gso_skb->ooo_okay;
+ /* All segments but the first should have ooo_okay cleared */
+ skb->ooo_okay = 0;
+
segs = skb_segment(skb, features);
if (IS_ERR(segs))
goto out;
+ /* Only first segment might have ooo_okay set */
+ segs->ooo_okay = ooo_okay;
+
delta = htonl(oldlen + (thlen + mss));
skb = segs;
thlen, skb->csum));
seq += mss;
+ if (copy_destructor) {
+ skb->destructor = gso_skb->destructor;
+ skb->sk = gso_skb->sk;
+ /* {tcp|sock}_wfree() use exact truesize accounting :
+ * sum(skb->truesize) MUST be exactly be gso_skb->truesize
+ * So we account mss bytes of 'true size' for each segment.
+ * The last segment will contain the remaining.
+ */
+ skb->truesize = mss;
+ gso_skb->truesize -= mss;
+ }
skb = skb->next;
th = tcp_hdr(skb);
* is freed at TX completion, and not right now when gso_skb
* is freed by GSO engine
*/
- if (gso_skb->destructor == tcp_wfree) {
+ if (copy_destructor) {
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
swap(gso_skb->truesize, skb->truesize);
for (i = 0; i < shi->nr_frags; ++i) {
const struct skb_frag_struct *f = &shi->frags[i];
- struct page *page = skb_frag_page(f);
- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+ unsigned int offset = f->page_offset;
+ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
+
+ sg_set_page(&sg, page, skb_frag_size(f),
+ offset_in_page(offset));
if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
return 1;
}
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int prior_sacked, bool is_dupack,
- int flag)
+ int prior_sacked, int prior_packets,
+ bool is_dupack, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+ newly_acked_sacked = prior_packets - tp->packets_out +
+ tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack);
if (is_dupack)
tcp_add_reno_sack(sk);
}
- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+ newly_acked_sacked = prior_packets - tp->packets_out +
+ tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
bool is_dupack = false;
u32 prior_in_flight;
u32 prior_fackets;
- int prior_packets;
+ int prior_packets = tp->packets_out;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
+ int previous_packets_out = 0;
/* If the ack is older than previous acks
* then we can probably ignore it.
sk->sk_err_soft = 0;
icsk->icsk_probes_out = 0;
tp->rcv_tstamp = tcp_time_stamp;
- prior_packets = tp->packets_out;
if (!prior_packets)
goto no_queue;
/* See if we can take anything off of the retransmit queue. */
+ previous_packets_out = tp->packets_out;
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
- pkts_acked = prior_packets - tp->packets_out;
+ pkts_acked = previous_packets_out - tp->packets_out;
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
} else {
if (flag & FLAG_DATA_ACKED)
tcp_cong_avoid(sk, ack, prior_in_flight);
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
}
SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0) {
+ if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
- skb->ooo_okay = 1;
- } else
- skb->ooo_okay = 0;
+
+ /* if no packet is in qdisc/device queue, then allow XPS to select
+ * another queue.
+ */
+ skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
}
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
- struct net_device *dev, int strict)
+ const struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp;
unsigned int hash = inet6_addr_hash(addr);
sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
/* Failure cases are ignored */
- if (!IS_ERR(sp_rt))
+ if (!IS_ERR(sp_rt)) {
+ sp_ifa->rt = sp_rt;
ip6_ins_rt(sp_rt);
+ }
}
read_unlock_bh(&idev->lock);
}
}
if (t == NULL)
t = netdev_priv(dev);
+ memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
if (t) {
err = 0;
+ memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
if (WARN_ON(np->cork.opt))
return -EINVAL;
- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
+ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
if (unlikely(np->cork.opt == NULL))
return -ENOBUFS;
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/export.h>
+#include <net/addrconf.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
return csum;
};
+static const struct nf_ipv6_ops ipv6ops = {
+ .chk_addr = ipv6_chk_addr,
+};
+
static const struct nf_afinfo nf_ip6_afinfo = {
.family = AF_INET6,
.checksum = nf_ip6_checksum,
int __init ipv6_netfilter_init(void)
{
+ RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
return nf_register_afinfo(&nf_ip6_afinfo);
}
*/
void ipv6_netfilter_fini(void)
{
+ RCU_INIT_POINTER(nf_ipv6_ops, NULL);
nf_unregister_afinfo(&nf_ip6_afinfo);
}
SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
- SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+ /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
SNMP_MIB_SENTINEL
};
}
#endif
+static void tcp_v6_clear_sk(struct sock *sk, int size)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* we do not want to clear pinet6 field, because of RCU lookups */
+ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+ memset(&inet->pinet6 + 1, 0, size);
+}
+
struct proto tcpv6_prot = {
.name = "TCPv6",
.owner = THIS_MODULE,
#ifdef CONFIG_MEMCG_KMEM
.proto_cgroup = tcp_proto_cgroup,
#endif
+ .clear_sk = tcp_v6_clear_sk,
};
static const struct inet6_protocol tcpv6_protocol = {
}
#endif /* CONFIG_PROC_FS */
+void udp_v6_clear_sk(struct sock *sk, int size)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* we do not want to clear pinet6 field, because of RCU lookups */
+ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+ memset(&inet->pinet6 + 1, 0, size);
+}
+
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
extern void udpv6_destroy_sock(struct sock *sk);
+extern void udp_v6_clear_sk(struct sock *sk, int size);
+
#ifdef CONFIG_PROC_FS
extern int udp6_seq_show(struct seq_file *seq, void *v);
#endif
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
- u8 *mac_start, *prevhdr;
+ u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
+ int tnl_hlen;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
- if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
- pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
- goto out;
+ tnl_hlen = skb_tnl_header_len(skb);
+ if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
- unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
- unfrag_ip6hlen;
- mac_start = skb_mac_header(skb);
- memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
+ unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+ unfrag_ip6hlen + tnl_hlen;
+ packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+ memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+ SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udplite6_protosw = {
dev_hold(dev);
xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
- if (!xdst->u.rt6.rt6i_idev)
+ if (!xdst->u.rt6.rt6i_idev) {
+ dev_put(dev);
return -ENODEV;
+ }
rt6_transfer_peer(&xdst->u.rt6, rt);
/*
* We now have some discovery info to deliver!
*/
- discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
+ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
if (!discovery) {
IRDA_WARNING("%s: unable to malloc!\n", __func__);
return;
out:
xfrm_pol_put(xp);
+ if (err == 0)
+ xfrm_garbage_collect(net);
return err;
}
out:
xfrm_pol_put(xp);
+ if (delete && err == 0)
+ xfrm_garbage_collect(net);
return err;
}
void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
__le16 fc, bool acked);
+void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
return 0;
}
-static int ieee80211_verify_mac(struct ieee80211_local *local, u8 *addr)
+static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *iter;
u64 new, mask, tmp;
u8 *m;
int ret = 0;
mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ list_for_each_entry(iter, &local->interfaces, list) {
+ if (iter == sdata)
+ continue;
+
+ if (iter->vif.type == NL80211_IFTYPE_MONITOR)
continue;
- m = sdata->vif.addr;
+ m = iter->vif.addr;
tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
if (ieee80211_sdata_running(sdata))
return -EBUSY;
- ret = ieee80211_verify_mac(sdata->local, sa->sa_data);
+ ret = ieee80211_verify_mac(sdata, sa->sa_data);
if (ret)
return ret;
master->control_port_protocol;
sdata->control_port_no_encrypt =
master->control_port_no_encrypt;
+ sdata->vif.cab_queue = master->vif.cab_queue;
+ memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
+ sizeof(sdata->vif.hw_queue));
break;
}
case NL80211_IFTYPE_AP:
ieee80211_recalc_ps(local, -1);
- if (dev) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ /* XXX: for AP_VLAN, actually track AP queues */
+ netif_tx_start_all_queues(dev);
+ } else if (dev) {
unsigned long flags;
int n_acs = IEEE80211_NUM_ACS;
int ac;
break;
}
+ /*
+ * Pick address of existing interface in case user changed
+ * MAC address manually, default to perm_addr.
+ */
m = local->hw.wiphy->perm_addr;
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ continue;
+ m = sdata->vif.addr;
+ break;
+ }
start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
ASSERT_RTNL();
+ /*
+ * Close all AP_VLAN interfaces first, as otherwise they
+ * might be closed while the AP interface they belong to
+ * is closed, causing unregister_netdevice_many() to crash.
+ */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ dev_close(sdata->dev);
+
mutex_lock(&local->iflist_mtx);
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
static void
ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
- u64 timestamp, struct ieee802_11_elems *elems)
+ u64 timestamp, struct ieee802_11_elems *elems,
+ bool beacon)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_chan_def new_vht_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
+ const struct ieee80211_ht_operation *ht_oper;
int secondary_channel_offset = -1;
ASSERT_MGD_MTX(ifmgd);
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
+ ht_oper = elems->ht_operation;
if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_40MHZ)) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
+ /* only used for bandwidth here */
+ ht_oper = NULL;
}
if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
return;
}
- if (sec_chan_offs) {
+ if (!beacon && sec_chan_offs) {
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
+ } else if (beacon && ht_oper) {
+ secondary_channel_offset =
+ ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
} else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- /* if HT is enabled and the IE not present, it's still HT */
+ /*
+ * If it's not a beacon, HT is enabled and the IE not present,
+ * it's 20 MHz, 802.11-2012 8.5.2.6:
+ * This element [the Secondary Channel Offset Element] is
+ * present when switching to a 40 MHz channel. It may be
+ * present when switching to a 20 MHz channel (in which
+ * case the secondary channel offset is set to SCN).
+ */
secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
}
mutex_unlock(&local->iflist_mtx);
}
- ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, elems);
+ ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
+ elems, true);
}
ieee80211_sta_process_chanswitch(sdata,
rx_status->mactime,
- &elems);
+ &elems, false);
} else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) {
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
ieee80211_sta_process_chanswitch(sdata,
rx_status->mactime,
- &elems);
+ &elems, false);
}
break;
}
if (WARN_ON_ONCE(!auth_data))
return -EINVAL;
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
- IEEE80211_TX_INTFL_MLME_CONN_TX;
-
auth_data->tries++;
if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
auth_data->expected_transaction = trans;
}
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_INTFL_MLME_CONN_TX;
+
ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
auth_data->data, auth_data->data_len,
auth_data->bss->bssid,
* will not answer to direct packet in unassociated state.
*/
ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
- NULL, 0, (u32) -1, true, tx_flags,
+ NULL, 0, (u32) -1, true, 0,
auth_data->bss->channel, false);
rcu_read_unlock();
}
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ if (tx_flags == 0) {
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
ifmgd->auth_data->timeout_started = true;
run_again(ifmgd, auth_data->timeout);
}
}
+#ifdef CONFIG_PM
+void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ mutex_lock(&ifmgd->mtx);
+ if (!ifmgd->associated) {
+ mutex_unlock(&ifmgd->mtx);
+ return;
+ }
+
+ if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
+ sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
+ mlme_dbg(sdata, "driver requested disconnect after resume\n");
+ ieee80211_sta_connection_lost(sdata,
+ ifmgd->associated->bssid,
+ WLAN_REASON_UNSPECIFIED,
+ true);
+ mutex_unlock(&ifmgd->mtx);
+ return;
+ }
+ mutex_unlock(&ifmgd->mtx);
+}
+#endif
+
/* interface setup */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx = !req->local_state_change;
- bool sent_frame = false;
+ bool report_frame = false;
mutex_lock(&ifmgd->mtx);
ieee80211_destroy_auth_data(sdata, false);
mutex_unlock(&ifmgd->mtx);
- sent_frame = tx;
+ report_frame = true;
goto out;
}
ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, tx, frame_buf);
- sent_frame = tx;
+ report_frame = true;
}
mutex_unlock(&ifmgd->mtx);
out:
- if (sent_frame)
+ if (report_frame)
__cfg80211_send_deauth(sdata->dev, frame_buf,
IEEE80211_DEAUTH_FRAME_LEN);
struct ieee80211_sta *pubsta,
struct ieee80211_sta_rates *rates)
{
- struct ieee80211_sta_rates *old = rcu_dereference(pubsta->rates);
+ struct ieee80211_sta_rates *old;
+ /*
+ * mac80211 guarantees that this function will not be called
+ * concurrently, so the following RCU access is safe, even without
+ * extra locking. This can not be checked easily, so we just set
+ * the condition to true.
+ */
+ old = rcu_dereference_protected(pubsta->rates, true);
rcu_assign_pointer(pubsta->rates, rates);
if (old)
kfree_rcu(old, rcu_head);
* and location updates. Note that mac80211
* itself never looks at these frames.
*/
+ if (!multicast &&
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1))
+ return 0;
if (ieee80211_is_public_action(hdr, skb->len))
return 1;
if (!ieee80211_is_beacon(hdr->frame_control))
u32 iv32 = get_unaligned_le32(&data[4]);
u16 iv16 = data[2] | (data[0] << 8);
- spin_lock_bh(&key->u.tkip.txlock);
+ spin_lock(&key->u.tkip.txlock);
ieee80211_compute_tkip_p1k(key, iv32);
tkip_mixing_phase2(tk, ctx, iv16, p2k);
- spin_unlock_bh(&key->u.tkip.txlock);
+ spin_unlock(&key->u.tkip.txlock);
}
EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
mb();
local->resuming = false;
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ ieee80211_sta_restart(sdata);
+ }
+
mod_timer(&local->sta_cleanup, jiffies + 1);
#else
WARN_ON(1);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
EXPORT_SYMBOL(nf_afinfo);
+const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ipv6_ops);
int nf_register_afinfo(const struct nf_afinfo *afinfo)
{
return th->rst;
}
+static inline bool is_new_conn(const struct sk_buff *skb,
+ struct ip_vs_iphdr *iph)
+{
+ switch (iph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph, *th;
+
+ th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ return false;
+ return th->syn;
+ }
+ case IPPROTO_SCTP: {
+ sctp_chunkhdr_t *sch, schunk;
+
+ sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
+ sizeof(schunk), &schunk);
+ if (sch == NULL)
+ return false;
+ return sch->type == SCTP_CID_INIT;
+ }
+ default:
+ return false;
+ }
+}
+
/* Handle response packets: rewrite addresses and send away...
*/
static unsigned int
* Check if the packet belongs to an existing connection entry
*/
cp = pp->conn_in_get(af, skb, &iph, 0);
+
+ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
+ unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
+ is_new_conn(skb, &iph)) {
+ ip_vs_conn_expire_now(cp);
+ __ip_vs_conn_put(cp);
+ cp = NULL;
+ }
+
if (unlikely(!cp) && !iph.fragoffs) {
/* No (second) fragments need to enter here, as nf_defrag_ipv6
* replayed fragment zero will already have created the cp
#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
struct ip_vs_sh_state {
- struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
struct rcu_head rcu_head;
+ struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
};
/*
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
va_end(args);
- logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
+ logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
}
rcu_read_unlock();
}
return 0;
out_sysctl:
+#ifdef CONFIG_PROC_FS
/* For init_net: errors will trigger panic, don't unroll on error. */
if (!net_eq(net, &init_net))
remove_proc_entry("nf_log", net->nf.proc_netfilter);
-
+#endif
return ret;
}
static void __net_exit nf_log_net_exit(struct net *net)
{
netfilter_log_sysctl_exit(net);
+#ifdef CONFIG_PROC_FS
remove_proc_entry("nf_log", net->nf.proc_netfilter);
+#endif
}
static struct pernet_operations nf_log_net_ops = {
/* log handler for internal netfilter logging api */
void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct nf_loginfo *li;
unsigned int qthreshold;
unsigned int plen;
- struct net *net = dev_net(in ? in : out);
struct nfnl_log_net *log = nfnl_log_pernet(net);
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
static void __net_exit nfnl_log_net_exit(struct net *net)
{
+#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
+#endif
}
static struct pernet_operations nfnl_log_net_ops = {
static void __net_exit nfnl_queue_net_exit(struct net *net)
{
+#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
+#endif
}
static struct pernet_operations nfnl_queue_net_ops = {
static void
-ipt_log_packet(u_int8_t pf,
+ipt_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const char *prefix)
{
struct sbuff *m;
- struct net *net = dev_net(in ? in : out);
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
- if (!recurse && skb->mark)
+ if (recurse && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
}
}
static void
-ip6t_log_packet(u_int8_t pf,
+ip6t_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const char *prefix)
{
struct sbuff *m;
- struct net *net = dev_net(in ? in : out);
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
{
const struct xt_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
+ struct net *net = dev_net(par->in ? par->in : par->out);
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
if (par->family == NFPROTO_IPV4)
- ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
+ ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
- ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
+ ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#endif
else
{
const struct xt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
+ struct net *net = dev_net(par->in ? par->in : par->out);
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
- nfulnl_log_packet(par->family, par->hooknum, skb, par->in,
+ nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in,
par->out, &li, info->prefix);
return XT_CONTINUE;
}
static unsigned int
tcpoptstrip_mangle_packet(struct sk_buff *skb,
- const struct xt_tcpoptstrip_target_info *info,
+ const struct xt_action_param *par,
unsigned int tcphoff, unsigned int minlen)
{
+ const struct xt_tcpoptstrip_target_info *info = par->targinfo;
unsigned int optl, i, j;
struct tcphdr *tcph;
u_int16_t n, o;
u_int8_t *opt;
+ int len;
+
+ /* This is a fragment, no TCP header is available */
+ if (par->fragoff != 0)
+ return XT_CONTINUE;
if (!skb_make_writable(skb, skb->len))
return NF_DROP;
+ len = skb->len - tcphoff;
+ if (len < (int)sizeof(struct tcphdr) ||
+ tcp_hdr(skb)->doff * 4 > len)
+ return NF_DROP;
+
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
opt = (u_int8_t *)tcph;
static unsigned int
tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
- return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb),
+ return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb),
sizeof(struct iphdr) + sizeof(struct tcphdr));
}
if (tcphoff < 0)
return NF_DROP;
- return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff,
+ return tcpoptstrip_mangle_packet(skb, par, tcphoff,
sizeof(*ipv6h) + sizeof(struct tcphdr));
}
#endif
#include <net/ip6_fib.h>
#endif
+#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_addrtype.h>
#include <linux/netfilter/x_tables.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
- const struct in6_addr *addr)
+ const struct in6_addr *addr, u16 mask)
{
const struct nf_afinfo *afinfo;
struct flowi6 flow;
struct rt6_info *rt;
- u32 ret;
+ u32 ret = 0;
int route_err;
memset(&flow, 0, sizeof(flow));
rcu_read_lock();
afinfo = nf_get_afinfo(NFPROTO_IPV6);
- if (afinfo != NULL)
+ if (afinfo != NULL) {
+ const struct nf_ipv6_ops *v6ops;
+
+ if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
+ v6ops = nf_get_ipv6_ops();
+ if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+ ret = XT_ADDRTYPE_LOCAL;
+ }
route_err = afinfo->route(net, (struct dst_entry **)&rt,
- flowi6_to_flowi(&flow), !!dev);
- else
+ flowi6_to_flowi(&flow), false);
+ } else {
route_err = 1;
-
+ }
rcu_read_unlock();
if (route_err)
if (rt->rt6i_flags & RTF_REJECT)
ret = XT_ADDRTYPE_UNREACHABLE;
- else
- ret = 0;
- if (rt->rt6i_flags & RTF_LOCAL)
+ if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
ret |= XT_ADDRTYPE_LOCAL;
if (rt->rt6i_flags & RTF_ANYCAST)
ret |= XT_ADDRTYPE_ANYCAST;
-
dst_release(&rt->dst);
return ret;
}
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
XT_ADDRTYPE_UNREACHABLE) & mask)
- return !!(mask & match_lookup_rt6(net, dev, addr));
+ return !!(mask & match_lookup_rt6(net, dev, addr, mask));
return true;
}
}
}
+/**
+ * netlbl_domhsh_validate - Validate a new domain mapping entry
+ * @entry: the entry to validate
+ *
+ * This function validates the new domain mapping entry to ensure that it is
+ * a valid entry. Returns zero on success, negative values on failure.
+ *
+ */
+static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
+{
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ if (entry->type_def.cipsov4 != NULL ||
+ entry->type_def.addrsel != NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ if (entry->type_def.cipsov4 == NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ switch (map4->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ if (map4->type_def.cipsov4 != NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ if (map4->type_def.cipsov4 == NULL)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ switch (map6->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+#endif /* IPv6 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Domain Hash Table Functions
*/
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
+ ret_val = netlbl_domhsh_validate(entry);
+ if (ret_val != 0)
+ return ret_val;
+
/* XXX - we can remove this RCU read lock as the spinlock protects the
* entire function, but before we do we need to fixup the
* netlbl_af[4,6]list RCU functions to do "the right thing" with
atomic_dec(&ring->pending);
sock_put(sk);
- skb->data = NULL;
+ skb->head = NULL;
}
#endif
if (skb->sk != NULL)
obj-$(CONFIG_NFC) += nfc.o
obj-$(CONFIG_NFC_NCI) += nci/
obj-$(CONFIG_NFC_HCI) += hci/
-#obj-$(CONFIG_NFC_LLCP) += llcp/
nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \
llcp_sock.o
}
if (R_tab) {
police->rate_present = true;
- psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
+ psched_ratecfg_precompute(&police->rate, &R_tab->rate);
qdisc_put_rtab(R_tab);
} else {
police->rate_present = false;
}
if (P_tab) {
police->peak_present = true;
- psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
+ psched_ratecfg_precompute(&police->peak, &P_tab->rate);
qdisc_put_rtab(P_tab);
} else {
police->peak_present = false;
};
if (police->rate_present)
- opt.rate.rate = psched_ratecfg_getrate(&police->rate);
+ psched_ratecfg_getrate(&opt.rate, &police->rate);
if (police->peak_present)
- opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
+ psched_ratecfg_getrate(&opt.peakrate, &police->peak);
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
if (police->tcfp_result &&
WARN_ON(timer_pending(&dev->watchdog_timer));
}
-void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf)
{
u64 factor;
u64 mult;
int shift;
- r->rate_bps = (u64)rate << 3;
- r->shift = 0;
+ memset(r, 0, sizeof(*r));
+ r->overhead = conf->overhead;
+ r->rate_bps = (u64)conf->rate << 3;
r->mult = 1;
/*
* Calibrate mult, shift so that token counting is accurate
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
- psched_time_t pq_key;
+ s64 pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
/* token bucket parameters */
struct psched_ratecfg rate;
struct psched_ratecfg ceil;
- s64 buffer, cbuffer; /* token bucket depth/rate */
- psched_tdiff_t mbuffer; /* max wait time */
- s64 tokens, ctokens; /* current number of tokens */
- psched_time_t t_c; /* checkpoint time */
+ s64 buffer, cbuffer; /* token bucket depth/rate */
+ s64 mbuffer; /* max wait time */
+ s64 tokens, ctokens; /* current number of tokens */
+ s64 t_c; /* checkpoint time */
};
struct htb_sched {
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
- psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
+ s64 near_ev_cache[TC_HTB_MAXDEPTH];
int defcls; /* class where unclassified flows go to */
/* filters for qdisc itself */
struct tcf_proto *filter_list;
- int rate2quantum; /* quant = rate / rate2quantum */
- psched_time_t now; /* cached dequeue time */
+ int rate2quantum; /* quant = rate / rate2quantum */
+ s64 now; /* cached dequeue time */
struct qdisc_watchdog watchdog;
/* non shaped skbs; let them go directly thru */
* next pending event (0 for no event in pq, q->now for too many events).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static psched_time_t htb_do_events(struct htb_sched *q, int level,
- unsigned long start)
+static s64 htb_do_events(struct htb_sched *q, int level,
+ unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
* 1 to simplify things when jiffy is going to be incremented
struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
- psched_time_t next_event;
+ s64 next_event;
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
- psched_time_t event;
+ s64 event;
if (q->now >= q->near_ev_cache[level]) {
event = htb_do_events(q, level, start_at);
memset(&opt, 0, sizeof(opt));
- opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
+ psched_ratecfg_getrate(&opt.rate, &cl->rate);
opt.buffer = PSCHED_NS2TICKS(cl->buffer);
- opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
+ psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
opt.quantum = cl->quantum;
opt.prio = cl->prio;
if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
- cl->xstats.tokens = cl->tokens;
- cl->xstats.ctokens = cl->ctokens;
+ cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
+ cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
- parent->t_c = psched_get_time();
+ parent->t_c = ktime_to_ns(ktime_get());
parent->cmode = HTB_CAN_SEND;
}
/* set class to be in HTB_CAN_SEND state */
cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
- cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
- cl->t_c = psched_get_time();
+ cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
+ cl->t_c = ktime_to_ns(ktime_get());
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
cl->prio = TC_HTB_NUMPRIO - 1;
}
- psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
- psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
+ psched_ratecfg_precompute(&cl->rate, &hopt->rate);
+ psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
q->tokens = q->buffer;
q->ptokens = q->mtu;
- psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
+ psched_ratecfg_precompute(&q->rate, &rtab->rate);
if (ptab) {
- psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
+ psched_ratecfg_precompute(&q->peak, &ptab->rate);
q->peak_present = true;
} else {
q->peak_present = false;
goto nla_put_failure;
opt.limit = q->limit;
- opt.rate.rate = psched_ratecfg_getrate(&q->rate);
+ psched_ratecfg_getrate(&opt.rate, &q->rate);
if (q->peak_present)
- opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
+ psched_ratecfg_getrate(&opt.peakrate, &q->peak);
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
opt.mtu = PSCHED_NS2TICKS(q->mtu);
unsigned int name_len;
};
-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address)
{
* BSD sendmsg interface
*/
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ struct socket *sock;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
fput_light(sock->file, fput_needed);
out:
return err;
}
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_sendmsg(fd, msg, flags);
+}
+
/*
* Linux sendmmsg interface
*/
while (datagrams < vlen) {
if (MSG_CMSG_COMPAT & flags) {
- err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags, &used_address);
+ err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
- err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags, &used_address);
+ err = ___sys_sendmsg(sock,
+ (struct msghdr __user *)entry,
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags)
{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
return __sys_sendmmsg(fd, mmsg, vlen, flags);
}
-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags, int nosec)
{
struct compat_msghdr __user *msg_compat =
* BSD recvmsg interface
*/
-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
- unsigned int, flags)
+long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ struct socket *sock;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+ err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
fput_light(sock->file, fput_needed);
out:
return err;
}
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_recvmsg(fd, msg, flags);
+}
+
/*
* Linux recvmmsg interface
*/
* No need to ask LSM for more than the first datagram.
*/
if (MSG_CMSG_COMPAT & flags) {
- err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags & ~MSG_WAITFORONE,
- datagrams);
+ err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags & ~MSG_WAITFORONE,
+ datagrams);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags & ~MSG_WAITFORONE,
- datagrams);
+ err = ___sys_recvmsg(sock,
+ (struct msghdr __user *)entry,
+ &msg_sys, flags & ~MSG_WAITFORONE,
+ datagrams);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
int datagrams;
struct timespec timeout_sys;
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
if (!timeout)
return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
#include <linux/sunrpc/gss_api.h>
#include <asm/uaccess.h>
+#include "../netns.h"
+
static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops;
};
/* pipe_version >= 0 if and only if someone has a pipe open. */
-static int pipe_version = -1;
-static atomic_t pipe_users = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(pipe_version_lock);
static struct rpc_wait_queue pipe_version_rpc_waitqueue;
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
char databuf[UPCALL_BUF_LEN];
};
-static int get_pipe_version(void)
+static int get_pipe_version(struct net *net)
{
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret;
spin_lock(&pipe_version_lock);
- if (pipe_version >= 0) {
- atomic_inc(&pipe_users);
- ret = pipe_version;
+ if (sn->pipe_version >= 0) {
+ atomic_inc(&sn->pipe_users);
+ ret = sn->pipe_version;
} else
ret = -EAGAIN;
spin_unlock(&pipe_version_lock);
return ret;
}
-static void put_pipe_version(void)
+static void put_pipe_version(struct net *net)
{
- if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
- pipe_version = -1;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
+ sn->pipe_version = -1;
spin_unlock(&pipe_version_lock);
}
}
static void
gss_release_msg(struct gss_upcall_msg *gss_msg)
{
+ struct net *net = rpc_net_ns(gss_msg->auth->client);
if (!atomic_dec_and_test(&gss_msg->count))
return;
- put_pipe_version();
+ put_pipe_version(net);
BUG_ON(!list_empty(&gss_msg->list));
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
struct rpc_clnt *clnt,
const char *service_name)
{
- if (pipe_version == 0)
+ struct net *net = rpc_net_ns(clnt);
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ if (sn->pipe_version == 0)
gss_encode_v0_msg(gss_msg);
else /* pipe_version == 1 */
gss_encode_v1_msg(gss_msg, clnt, service_name);
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
if (gss_msg == NULL)
return ERR_PTR(-ENOMEM);
- vers = get_pipe_version();
+ vers = get_pipe_version(rpc_net_ns(clnt));
if (vers < 0) {
kfree(gss_msg);
return ERR_PTR(vers);
static inline int
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
{
+ struct net *net = rpc_net_ns(gss_auth->client);
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_pipe *pipe;
struct rpc_cred *cred = &gss_cred->gc_base;
struct gss_upcall_msg *gss_msg;
+ unsigned long timeout;
DEFINE_WAIT(wait);
- int err = 0;
+ int err;
dprintk("RPC: %s for uid %u\n",
__func__, from_kuid(&init_user_ns, cred->cr_uid));
retry:
+ err = 0;
+ /* Default timeout is 15s unless we know that gssd is not running */
+ timeout = 15 * HZ;
+ if (!sn->gssd_running)
+ timeout = HZ >> 2;
gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
err = wait_event_interruptible_timeout(pipe_version_waitqueue,
- pipe_version >= 0, 15*HZ);
- if (pipe_version < 0) {
+ sn->pipe_version >= 0, timeout);
+ if (sn->pipe_version < 0) {
+ if (err == 0)
+ sn->gssd_running = 0;
warn_gssd();
err = -EACCES;
}
- if (err)
+ if (err < 0)
goto out;
goto retry;
}
static int gss_pipe_open(struct inode *inode, int new_version)
{
+ struct net *net = inode->i_sb->s_fs_info;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret = 0;
spin_lock(&pipe_version_lock);
- if (pipe_version < 0) {
+ if (sn->pipe_version < 0) {
/* First open of any gss pipe determines the version: */
- pipe_version = new_version;
+ sn->pipe_version = new_version;
rpc_wake_up(&pipe_version_rpc_waitqueue);
wake_up(&pipe_version_waitqueue);
- } else if (pipe_version != new_version) {
+ } else if (sn->pipe_version != new_version) {
/* Trying to open a pipe of a different version */
ret = -EBUSY;
goto out;
}
- atomic_inc(&pipe_users);
+ atomic_inc(&sn->pipe_users);
out:
spin_unlock(&pipe_version_lock);
return ret;
static void
gss_pipe_release(struct inode *inode)
{
+ struct net *net = inode->i_sb->s_fs_info;
struct rpc_pipe *pipe = RPC_I(inode)->pipe;
struct gss_upcall_msg *gss_msg;
}
spin_unlock(&pipe->lock);
- put_pipe_version();
+ put_pipe_version(net);
}
static void
#ifdef CONFIG_PROC_FS
-static bool set_gss_proxy(struct net *net, int type)
+static int set_gss_proxy(struct net *net, int type)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret = 0;
return false;
}
-static int wait_for_gss_proxy(struct net *net)
+static int wait_for_gss_proxy(struct net *net, struct file *file)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
+ return -EAGAIN;
return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
}
size_t len;
int ret;
- ret = wait_for_gss_proxy(net);
+ ret = wait_for_gss_proxy(net, file);
if (ret)
return ret;
wait_queue_head_t gssp_wq;
struct rpc_clnt *gssp_clnt;
int use_gss_proxy;
+ int pipe_version;
+ atomic_t pipe_users;
struct proc_dir_entry *use_gssp_proc;
+
+ unsigned int gssd_running;
};
extern int sunrpc_net_id;
static int
rpc_pipe_open(struct inode *inode, struct file *filp)
{
+ struct net *net = inode->i_sb->s_fs_info;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_pipe *pipe;
int first_open;
int res = -ENXIO;
mutex_lock(&inode->i_mutex);
+ sn->gssd_running = 1;
pipe = RPC_I(inode)->pipe;
if (pipe == NULL)
goto out;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_init(&sn->pipefs_sb_lock);
+ sn->gssd_running = 1;
+ sn->pipe_version = -1;
}
/*
* Note: If the task is ASYNC, and is being made runnable after sitting on an
* rpc_wait_queue, this must be called with the queue spinlock held to protect
* the wait queue operation.
+ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
+ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
+ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
+ * the RPC_TASK_RUNNING flag.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
+ bool need_wakeup = !rpc_test_and_set_running(task);
+
rpc_clear_queued(task);
- if (rpc_test_and_set_running(task))
+ if (!need_wakeup)
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
goto badcred;
argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
argv->iov_len -= slen*4;
-
+ /*
+ * Note: we skip uid_valid()/gid_valid() checks here for
+ * backwards compatibility with clients that use -1 id's.
+ * Instead, -1 uid or gid is later mapped to the
+ * (export-specific) anonymous id by nfsd_setuser.
+ * Supplementary gid's will be left alone.
+ */
cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
- if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
- goto badcred;
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
return SVC_CLOSE;
for (i = 0; i < slen; i++) {
kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
- if (!gid_valid(kgid))
- goto badcred;
GROUP_AT(cred->cr_group_info, i) = kgid;
}
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
* cfg80211_mutex lock
*/
res = rfkill_register(rdev->rfkill);
- if (res)
- goto out_rm_dev;
+ if (res) {
+ device_del(&rdev->wiphy.dev);
+
+ mutex_lock(&cfg80211_mutex);
+ debugfs_remove_recursive(rdev->wiphy.debugfsdir);
+ list_del_rcu(&rdev->list);
+ wiphy_regulatory_deregister(wiphy);
+ mutex_unlock(&cfg80211_mutex);
+ return res;
+ }
rtnl_lock();
rdev->wiphy.registered = true;
rtnl_unlock();
return 0;
-
-out_rm_dev:
- device_del(&rdev->wiphy.dev);
- return res;
}
EXPORT_SYMBOL(wiphy_register);
#endif
__cfg80211_disconnect(rdev, dev,
WLAN_REASON_DEAUTH_LEAVING, true);
- cfg80211_mlme_down(rdev, dev);
wdev_unlock(wdev);
break;
case NL80211_IFTYPE_MESH_POINT:
(u32)sinfo->rx_bytes))
goto nla_put_failure;
if ((sinfo->filled & (STATION_INFO_TX_BYTES |
- NL80211_STA_INFO_TX_BYTES64)) &&
+ STATION_INFO_TX_BYTES64)) &&
nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
(u32)sinfo->tx_bytes))
goto nla_put_failure;
&tcp->payload_tok))
return -ENOBUFS;
+ nla_nest_end(msg, nl_tcp);
+
return 0;
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
netdev->ifindex)) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
(sig_dbm &&
nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
netdev->ifindex)) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
(ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
mutex_lock(&rdev->sched_scan_mtx);
list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (!wdev->netdev)
+ continue;
+
wdev_lock(wdev);
if (!netif_running(wdev->netdev)) {
wdev_unlock(wdev);
/* was it connected by userspace SME? */
if (!wdev->conn) {
cfg80211_mlme_down(rdev, dev);
- return 0;
+ goto disconnect;
}
if (wdev->sme_state == CFG80211_SME_CONNECTING &&
return err;
}
+ disconnect:
if (wdev->sme_state == CFG80211_SME_CONNECTED)
__cfg80211_disconnected(dev, NULL, 0, 0, false);
else if (wdev->sme_state == CFG80211_SME_CONNECTING)
TP_STRUCT__entry(
WIPHY_ENTRY
WDEV_ENTRY
+ __field(bool, non_wireless)
__field(bool, disconnect)
__field(bool, magic_pkt)
__field(bool, gtk_rekey_failure)
__field(bool, rfkill_release)
__field(s32, pattern_idx)
__field(u32, packet_len)
- __dynamic_array(u8, packet, wakeup->packet_present_len)
+ __dynamic_array(u8, packet,
+ wakeup ? wakeup->packet_present_len : 0)
),
TP_fast_assign(
WIPHY_ASSIGN;
WDEV_ASSIGN;
- __entry->disconnect = wakeup->disconnect;
- __entry->magic_pkt = wakeup->magic_pkt;
- __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure;
- __entry->eap_identity_req = wakeup->eap_identity_req;
- __entry->four_way_handshake = wakeup->four_way_handshake;
- __entry->rfkill_release = wakeup->rfkill_release;
- __entry->pattern_idx = wakeup->pattern_idx;
- __entry->packet_len = wakeup->packet_len;
- if (wakeup->packet && wakeup->packet_present_len)
+ __entry->non_wireless = !wakeup;
+ __entry->disconnect = wakeup ? wakeup->disconnect : false;
+ __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false;
+ __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false;
+ __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false;
+ __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false;
+ __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false;
+ __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false;
+ __entry->packet_len = wakeup ? wakeup->packet_len : false;
+ if (wakeup && wakeup->packet && wakeup->packet_present_len)
memcpy(__get_dynamic_array(packet), wakeup->packet,
wakeup->packet_present_len);
),
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
+ err = -EINVAL;
goto error;
}
}
}
-static void xfrm_garbage_collect(struct net *net)
+void xfrm_garbage_collect(struct net *net)
{
flow_cache_flush();
__xfrm_garbage_collect(net);
}
+EXPORT_SYMBOL(xfrm_garbage_collect);
static void xfrm_garbage_collect_deferred(struct net *net)
{
out:
xfrm_pol_put(xp);
+ if (delete && err == 0)
+ xfrm_garbage_collect(net);
return err;
}
quiet_cmd_dtc = DTC $@
cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
$(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
- -i $(srctree)/arch/$(SRCARCH)/boot/dts $(DTC_FLAGS) \
+ -i $(dir $<) $(DTC_FLAGS) \
-d $(depfile).dtc $(dtc-tmp) ; \
cat $(depfile).pre $(depfile).dtc > $(depfile)
;;
--refresh)
;;
- --*-after)
+ --*-after|-E|-D|-M)
checkarg "$1"
A=$ARG
checkarg "$2"
}
}
- if (i < max_choice ||
- key == KEY_UP || key == KEY_DOWN ||
- key == '-' || key == '+' ||
- key == KEY_PPAGE || key == KEY_NPAGE) {
+ if (item_count() != 0 &&
+ (i < max_choice ||
+ key == KEY_UP || key == KEY_DOWN ||
+ key == '-' || key == '+' ||
+ key == KEY_PPAGE || key == KEY_NPAGE)) {
/* Remove highligt of current item */
print_item(scroll + choice, choice, FALSE);
active_menu, &s_scroll);
if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL)
break;
- if (!item_activate_selected())
- continue;
- if (!item_tag())
- continue;
-
+ if (item_count() != 0) {
+ if (!item_activate_selected())
+ continue;
+ if (!item_tag())
+ continue;
+ }
submenu = item_data();
active_menu = item_data();
if (submenu)
struct menu *menu = current_entry;
while ((menu = menu->parent) != NULL) {
+ struct expr *dup_expr;
+
if (!menu->visibility)
continue;
+ /*
+ * Do not add a reference to the
+ * menu's visibility expression but
+ * use a copy of it. Otherwise the
+ * expression reduction functions
+ * will modify expressions that have
+ * multiple references which can
+ * cause unwanted side effects.
+ */
+ dup_expr = expr_copy(menu->visibility);
+
prop->visible.expr
= expr_alloc_and(prop->visible.expr,
- menu->visibility);
+ dup_expr);
}
}
mv -f $(objtree)/.tmp_version $(objtree)/.version
$(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \
- $(UTS_MACHINE) -bb $<
+ $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
rm binkernel.spec
# Deb target
memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
+ atomic_inc(&selinux_xfrm_refcount);
*new_ctxp = new_ctx;
}
return 0;
*/
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
+ atomic_dec(&selinux_xfrm_refcount);
kfree(ctx);
}
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
const struct task_security_struct *tsec = current_security();
- int rc = 0;
- if (ctx) {
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc == 0)
- atomic_dec(&selinux_xfrm_refcount);
- }
+ if (!ctx)
+ return 0;
- return rc;
+ return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
}
/*
*/
void selinux_xfrm_state_free(struct xfrm_state *x)
{
- struct xfrm_sec_ctx *ctx = x->security;
- kfree(ctx);
+ atomic_dec(&selinux_xfrm_refcount);
+ kfree(x->security);
}
/*
{
const struct task_security_struct *tsec = current_security();
struct xfrm_sec_ctx *ctx = x->security;
- int rc = 0;
- if (ctx) {
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc == 0)
- atomic_dec(&selinux_xfrm_refcount);
- }
+ if (!ctx)
+ return 0;
- return rc;
+ return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
}
/*
MODULE_ALIAS("aoa-device-id-14");
MODULE_ALIAS("aoa-device-id-22");
MODULE_ALIAS("aoa-device-id-35");
+MODULE_ALIAS("aoa-device-id-44");
/* onyx with all but microphone connected */
static struct codec_connection onyx_connections_nomic[] = {
.connections = tas_connections_nolineout,
},
},
+ /* PowerBook6,5 */
+ { .device_id = 44,
+ .codecs[0] = {
+ .name = "tas",
+ .connections = tas_connections_all,
+ },
+ },
/* PowerBook6,7 */
{ .layout_id = 80,
.codecs[0] = {
* We probably cannot handle all device-id machines,
* so restrict to those we do handle for now.
*/
- if (id && (*id == 22 || *id == 14 || *id == 35)) {
+ if (id && (*id == 22 || *id == 14 || *id == 35 ||
+ *id == 44)) {
snprintf(dev->sound.modalias, 32,
"aoa-device-id-%d", *id);
ok = 1;
menuconfig SOUND_OSS
tristate "OSS sound modules"
depends on ISA_DMA_API && VIRT_TO_BUS
- depends on !ISA_DMA_SUPPORT_BROKEN
+ depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
help
OSS is the Open Sound System suite of sound card drivers. They make
sound programming easier since they provide a common API. Say Y or
return false;
}
+/* check whether the NID is referred by any active paths */
+#define is_active_nid_for_any(codec, nid) \
+ is_active_nid(codec, nid, HDA_OUTPUT, 0)
+
/* get the default amp value for the target state */
static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
int dir, unsigned int caps, bool enable)
for (i = 0; i < path->depth; i++) {
hda_nid_t nid = path->path[i];
- if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) {
+ if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) &&
+ !is_active_nid_for_any(codec, nid)) {
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_POWER_STATE,
AC_PWRST_D3);
return;
if (codec->inv_eapd)
enable = !enable;
+ if (spec->keep_eapd_on && !enable)
+ return;
snd_hda_codec_update_cache(codec, pin, 0,
AC_VERB_SET_EAPD_BTLENABLE,
enable ? 0x02 : 0x00);
* independent HP controls
*/
-/* update HP auto-mute state too */
-static void update_hp_automute_hook(struct hda_codec *codec)
-{
- struct hda_gen_spec *spec = codec->spec;
-
- if (spec->hp_automute_hook)
- spec->hp_automute_hook(codec, NULL);
- else
- snd_hda_gen_hp_automute(codec, NULL);
-}
-
+static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack);
static int indep_hp_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
else
*dacp = spec->alt_dac_nid;
- update_hp_automute_hook(codec);
+ call_hp_automute(codec, NULL);
ret = 1;
}
unlock:
else
val = PIN_HP;
set_pin_target(codec, pin, val, true);
- update_hp_automute_hook(codec);
+ call_hp_automute(codec, NULL);
}
}
val = snd_hda_get_default_vref(codec, nid);
}
snd_hda_set_pin_ctl_cache(codec, nid, val);
- update_hp_automute_hook(codec);
+ call_hp_automute(codec, NULL);
return 1;
}
}
EXPORT_SYMBOL_HDA(snd_hda_gen_mic_autoswitch);
-/* update jack retasking */
-static void update_automute_all(struct hda_codec *codec)
+/* call appropriate hooks */
+static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
{
struct hda_gen_spec *spec = codec->spec;
+ if (spec->hp_automute_hook)
+ spec->hp_automute_hook(codec, jack);
+ else
+ snd_hda_gen_hp_automute(codec, jack);
+}
- update_hp_automute_hook(codec);
+static void call_line_automute(struct hda_codec *codec,
+ struct hda_jack_tbl *jack)
+{
+ struct hda_gen_spec *spec = codec->spec;
if (spec->line_automute_hook)
- spec->line_automute_hook(codec, NULL);
+ spec->line_automute_hook(codec, jack);
else
- snd_hda_gen_line_automute(codec, NULL);
+ snd_hda_gen_line_automute(codec, jack);
+}
+
+static void call_mic_autoswitch(struct hda_codec *codec,
+ struct hda_jack_tbl *jack)
+{
+ struct hda_gen_spec *spec = codec->spec;
if (spec->mic_autoswitch_hook)
- spec->mic_autoswitch_hook(codec, NULL);
+ spec->mic_autoswitch_hook(codec, jack);
else
- snd_hda_gen_mic_autoswitch(codec, NULL);
+ snd_hda_gen_mic_autoswitch(codec, jack);
+}
+
+/* update jack retasking */
+static void update_automute_all(struct hda_codec *codec)
+{
+ call_hp_automute(codec, NULL);
+ call_line_automute(codec, NULL);
+ call_mic_autoswitch(codec, NULL);
}
/*
snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n",
nid);
snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT,
- spec->hp_automute_hook ?
- spec->hp_automute_hook :
- snd_hda_gen_hp_automute);
+ call_hp_automute);
spec->detect_hp = 1;
}
snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid);
snd_hda_jack_detect_enable_callback(codec, nid,
HDA_GEN_FRONT_EVENT,
- spec->line_automute_hook ?
- spec->line_automute_hook :
- snd_hda_gen_line_automute);
+ call_line_automute);
spec->detect_lo = 1;
}
spec->automute_lo_possible = spec->detect_hp;
snd_hda_jack_detect_enable_callback(codec,
spec->am_entry[i].pin,
HDA_GEN_MIC_EVENT,
- spec->mic_autoswitch_hook ?
- spec->mic_autoswitch_hook :
- snd_hda_gen_mic_autoswitch);
+ call_mic_autoswitch);
return true;
}
return power_state;
if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER)
return power_state;
- if (is_active_nid(codec, nid, HDA_OUTPUT, 0))
+ if (is_active_nid_for_any(codec, nid))
return power_state;
return AC_PWRST_D3;
}
unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */
unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */
unsigned int own_eapd_ctl:1; /* set EAPD by own function */
+ unsigned int keep_eapd_on:1; /* don't turn off EAPD automatically */
unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
unsigned int indep_hp:1; /* independent HP supported */
unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
spec->codec_type = VT1708S;
spec->no_pin_power_ctl = 1;
spec->gen.indep_hp = 1;
+ spec->gen.keep_eapd_on = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
return spec;
}
static void set_widgets_power_state(struct hda_codec *codec)
{
+#if 0 /* FIXME: the assumed connections don't match always with the
+ * actual routes by the generic parser, so better to disable
+ * the control for safety.
+ */
struct via_spec *spec = codec->spec;
if (spec->set_widgets_power_state)
spec->set_widgets_power_state(codec);
+#endif
}
static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
/* Fix pop noise on headphones */
int i;
for (i = 0; i < spec->gen.autocfg.hp_outs; i++)
- snd_hda_set_pin_ctl(codec, spec->gen.autocfg.hp_pins[i], 0);
+ snd_hda_codec_write(codec, spec->gen.autocfg.hp_pins[i],
+ 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ 0x00);
}
return 0;
if (rc)
goto error_out;
- if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0) {
+ rc = pci_set_dma_mask(pci, DMA_BIT_MASK(30));
+ if (rc < 0) {
dev_err(&pci->dev, "architecture does not support 30-bit PCI busmaster DMA");
goto error_out_enabled;
}
/* AB8500_ADSLOTSELX */
#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00
-#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01
-#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02
-#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03
-#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04
-#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05
-#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06
-#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07
-#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08
-#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x10
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x20
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x30
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x40
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x50
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x60
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x70
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x80
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0
#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
-#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10
-#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20
-#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30
-#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40
-#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50
-#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60
-#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70
-#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80
-#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x08
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0x0F
#define AB8500_ADSLOTSELX_EVEN_SHIFT 0
#define AB8500_ADSLOTSELX_ODD_SHIFT 4
{ CS42L52_BEEP_VOL, 0x00 }, /* r1D Beep Volume off Time */
{ CS42L52_BEEP_TONE_CTL, 0x00 }, /* r1E Beep Tone Cfg. */
{ CS42L52_TONE_CTL, 0x00 }, /* r1F Tone Ctl */
- { CS42L52_MASTERA_VOL, 0x88 }, /* r20 Master A Volume */
+ { CS42L52_MASTERA_VOL, 0x00 }, /* r20 Master A Volume */
{ CS42L52_MASTERB_VOL, 0x00 }, /* r21 Master B Volume */
{ CS42L52_HPA_VOL, 0x00 }, /* r22 Headphone A Volume */
{ CS42L52_HPB_VOL, 0x00 }, /* r23 Headphone B Volume */
};
static const struct soc_enum mic_bias_level_enum =
- SOC_ENUM_SINGLE(CS42L52_IFACE_CTL1, 0,
+ SOC_ENUM_SINGLE(CS42L52_IFACE_CTL2, 0,
ARRAY_SIZE(mic_bias_level_text), mic_bias_level_text);
static const char * const cs42l52_mic_text[] = { "Single", "Differential" };
SOC_ENUM("Headphone Analog Gain", hp_gain_enum),
SOC_DOUBLE_R_SX_TLV("Speaker Volume", CS42L52_SPKA_VOL,
- CS42L52_SPKB_VOL, 7, 0x1, 0xff, hl_tlv),
+ CS42L52_SPKB_VOL, 0, 0x1, 0xff, hl_tlv),
SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
CS42L52_PASSTHRUB_VOL, 6, 0x18, 0x90, pga_tlv),
SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume",
CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL,
- 6, 0x7f, 0x19, hl_tlv),
+ 0, 0x7f, 0x19, hl_tlv),
SOC_DOUBLE_R("PCM Mixer Switch",
CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1),
#define CS42L52_PB_CTL1_INV_PCMA (1 << 2)
#define CS42L52_PB_CTL1_MSTB_MUTE (1 << 1)
#define CS42L52_PB_CTL1_MSTA_MUTE (1 << 0)
-#define CS42L52_PB_CTL1_MUTE_MASK 0xFFFD
+#define CS42L52_PB_CTL1_MUTE_MASK 0x03
#define CS42L52_PB_CTL1_MUTE 3
#define CS42L52_PB_CTL1_UNMUTE 0
DA7213_DMIC_DATA_SEL_SHIFT);
break;
}
- switch (pdata->dmic_data_sel) {
+ switch (pdata->dmic_samplephase) {
case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
- dmic_cfg |= (pdata->dmic_data_sel <<
+ dmic_cfg |= (pdata->dmic_samplephase <<
DA7213_DMIC_SAMPLEPHASE_SHIFT);
break;
}
- switch (pdata->dmic_data_sel) {
+ switch (pdata->dmic_clk_rate) {
case DA7213_DMIC_CLK_3_0MHZ:
case DA7213_DMIC_CLK_1_5MHZ:
- dmic_cfg |= (pdata->dmic_data_sel <<
+ dmic_cfg |= (pdata->dmic_clk_rate <<
DA7213_DMIC_CLK_RATE_SHIFT);
break;
}
dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
ret = request_threaded_irq(max98090->irq, NULL,
- max98090_interrupt, IRQF_TRIGGER_FALLING,
+ max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"max98090_interrupt", codec);
if (ret < 0) {
dev_err(codec->dev, "request_irq failed: %d\n",
/* On wm0010 only the CLKCTRL1 value is used */
pll_rec.clkctrl1 = wm0010->pll_clkctrl1;
+ ret = -ENOMEM;
len = pll_rec.length + 8;
out = kzalloc(len, GFP_KERNEL);
if (!out) {
ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
if (ret != 0)
return ret;
+ arizona_init_spk(codec);
+
snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
priv->core.arizona->dapm = &codec->dapm;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int drc = wm8994_get_drc(kcontrol->id.name);
+ if (drc < 0)
+ return drc;
ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc];
return 0;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
+ if (block < 0)
+ return block;
+
ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
return 0;
{
struct snd_soc_codec *codec = w->codec;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994 *control = codec->control_data;
+ struct wm8994 *control = wm8994->wm8994;
int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
int i;
int dac;
dev_dbg(codec->dev, "Ignoring removed jack\n");
return IRQ_HANDLED;
}
+ } else if (!(reg & WM8958_MICD_STS)) {
+ snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+ SND_JACK_MECHANICAL | SND_JACK_HEADSET |
+ wm8994->btn_mask);
+ goto out;
}
if (wm8994->mic_detecting)
int word_length)
{
u32 fmt;
- u32 rotate = (word_length / 4) & 0x7;
+ u32 tx_rotate = (word_length / 4) & 0x7;
+ u32 rx_rotate = (32 - word_length) / 4;
u32 mask = (1ULL << word_length) - 1;
/*
mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
TXSSZ(fmt), TXSSZ(0x0F));
mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
- TXROT(rotate), TXROT(7));
+ TXROT(tx_rotate), TXROT(7));
mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
- RXROT(rotate), RXROT(7));
+ RXROT(rx_rotate), RXROT(7));
mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG,
mask);
}
clk_prepare_enable(ssi->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto failed_get_resource;
- }
-
ssi->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ssi->base)) {
ret = PTR_ERR(ssi->base);
snd_soc_unregister_component(&pdev->dev);
failed_register:
release_mem_region(res->start, resource_size(res));
-failed_get_resource:
clk_disable_unprepare(ssi->clk);
failed_clk:
dev_set_drvdata(&pdev->dev, priv);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "platform_get_resource failed\n");
- return -ENXIO;
- }
-
priv->io = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->io))
return PTR_ERR(priv->io);
goto err;
}
- snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
- SND_SOC_DAPM_STREAM_START);
+ if (cstream->direction == SND_COMPRESS_PLAYBACK)
+ snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
+ SND_SOC_DAPM_STREAM_START);
+ else
+ snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
+ SND_SOC_DAPM_STREAM_START);
/* cancel any delayed stream shutdown that is pending */
rtd->pop_wait = 0;
0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */
};
-static const u8 known_fw_versions[][4] = {
- { 0x03, 0x01, 0x0b, 0x00 }
+static const u8 known_fw_versions[][2] = {
+ { 0x03, 0x01 }
};
struct ihex_record {
int i;
for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++)
- if (!memcmp(version, known_fw_versions + i, 4))
+ if (!memcmp(version, known_fw_versions + i, 2))
return 0;
snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. "
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
case USB_ID(0x046d, 0x0991):
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
}
static void proc_dump_ep_status(struct snd_usb_substream *subs,
- struct snd_usb_endpoint *ep,
+ struct snd_usb_endpoint *data_ep,
+ struct snd_usb_endpoint *sync_ep,
struct snd_info_buffer *buffer)
{
- if (!ep)
+ if (!data_ep)
return;
- snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize);
+ snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize);
snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
subs->speed == USB_SPEED_FULL
- ? get_full_speed_hz(ep->freqm)
- : get_high_speed_hz(ep->freqm),
- ep->freqm >> 16, ep->freqm & 0xffff);
- if (ep->freqshift != INT_MIN) {
- int res = 16 - ep->freqshift;
+ ? get_full_speed_hz(data_ep->freqm)
+ : get_high_speed_hz(data_ep->freqm),
+ data_ep->freqm >> 16, data_ep->freqm & 0xffff);
+ if (sync_ep && data_ep->freqshift != INT_MIN) {
+ int res = 16 - data_ep->freqshift;
snd_iprintf(buffer, " Feedback Format = %d.%d\n",
- (ep->syncmaxsize > 3 ? 32 : 24) - res, res);
+ (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res);
}
}
snd_iprintf(buffer, " Status: Running\n");
snd_iprintf(buffer, " Interface = %d\n", subs->interface);
snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx);
- proc_dump_ep_status(subs, subs->data_endpoint, buffer);
- proc_dump_ep_status(subs, subs->sync_endpoint, buffer);
+ proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer);
} else {
snd_iprintf(buffer, " Status: Stop\n");
}
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
},
{
- USB_DEVICE(0x046d, 0x0990),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x046d,
+ .idProduct = 0x0990,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
.vendor_name = "Logitech, Inc.",
.product_name = "QuickCam Pro 9000",
USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
.ifnum = 0,
- .type = QUIRK_MIDI_STANDARD_INTERFACE
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0007,
+ .in_cables = 0x0007
+ }
}
},
{
def get_kallsyms_table():
global kallsyms
+
try:
f = open("/proc/kallsyms", "r")
- linecount = 0
- for line in f:
- linecount = linecount+1
- f.seek(0)
except:
return
-
- j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
- j = j +1
- if ((j % 100) == 0):
- print "\r" + str(j) + "/" + str(linecount),
- kallsyms.append({ 'loc': loc, 'name' : name})
-
- print "\r" + str(j) + "/" + str(linecount)
+ kallsyms.append((loc, name))
kallsyms.sort()
- return
def get_sym(sloc):
loc = int(sloc)
- for i in kallsyms:
- if (i['loc'] >= loc):
- return (i['name'], i['loc']-loc)
- return (None, 0)
+
+ # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
+ # kallsyms[i][0] > loc for all end <= i < len(kallsyms)
+ start, end = -1, len(kallsyms)
+ while end != start + 1:
+ pivot = (start + end) // 2
+ if loc < kallsyms[pivot][0]:
+ end = pivot
+ else:
+ start = pivot
+
+ # Now (start == -1 or kallsyms[start][0] <= loc)
+ # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
+ if start >= 0:
+ symloc, name = kallsyms[start]
+ return (name, loc - symloc)
+ else:
+ return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
- skbaddr, protocol, location):
+ skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
TARGETS += mqueue
TARGETS += net
TARGETS += ptrace
-TARGETS += soft-dirty
TARGETS += vm
all:
+++ /dev/null
-CFLAGS += -iquote../../../../include/uapi -Wall
-soft-dirty: soft-dirty.c
-
-all: soft-dirty
-
-clean:
- rm -f soft-dirty
-
-run_tests: all
- @./soft-dirty || echo "soft-dirty selftests: [FAIL]"
+++ /dev/null
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-
-typedef unsigned long long u64;
-
-#define PME_PRESENT (1ULL << 63)
-#define PME_SOFT_DIRTY (1Ull << 55)
-
-#define PAGES_TO_TEST 3
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
-static void get_pagemap2(char *mem, u64 *map)
-{
- int fd;
-
- fd = open("/proc/self/pagemap2", O_RDONLY);
- if (fd < 0) {
- perror("Can't open pagemap2");
- exit(1);
- }
-
- lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET);
- read(fd, map, sizeof(u64) * PAGES_TO_TEST);
- close(fd);
-}
-
-static inline char map_p(u64 map)
-{
- return map & PME_PRESENT ? 'p' : '-';
-}
-
-static inline char map_sd(u64 map)
-{
- return map & PME_SOFT_DIRTY ? 'd' : '-';
-}
-
-static int check_pte(int step, int page, u64 *map, u64 want)
-{
- if ((map[page] & want) != want) {
- printf("Step %d Page %d has %c%c, want %c%c\n",
- step, page,
- map_p(map[page]), map_sd(map[page]),
- map_p(want), map_sd(want));
- return 1;
- }
-
- return 0;
-}
-
-static void clear_refs(void)
-{
- int fd;
- char *v = "4";
-
- fd = open("/proc/self/clear_refs", O_WRONLY);
- if (write(fd, v, 3) < 3) {
- perror("Can't clear soft-dirty bit");
- exit(1);
- }
- close(fd);
-}
-
-int main(void)
-{
- char *mem, x;
- u64 map[PAGES_TO_TEST];
-
- mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
-
- x = mem[0];
- mem[2 * PAGE_SIZE] = 'c';
- get_pagemap2(mem, map);
-
- if (check_pte(1, 0, map, PME_PRESENT))
- return 1;
- if (check_pte(1, 1, map, 0))
- return 1;
- if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY))
- return 1;
-
- clear_refs();
- get_pagemap2(mem, map);
-
- if (check_pte(2, 0, map, PME_PRESENT))
- return 1;
- if (check_pte(2, 1, map, 0))
- return 1;
- if (check_pte(2, 2, map, PME_PRESENT))
- return 1;
-
- mem[0] = 'a';
- mem[PAGE_SIZE] = 'b';
- x = mem[2 * PAGE_SIZE];
- get_pagemap2(mem, map);
-
- if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY))
- return 1;
- if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY))
- return 1;
- if (check_pte(3, 2, map, PME_PRESENT))
- return 1;
-
- (void)x; /* gcc warn */
-
- printf("PASS\n");
- return 0;
-}